aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/jackc/pgx/v5
diff options
context:
space:
mode:
authorGibheer <gibheer+git@zero-knowledge.org>2024-09-05 19:38:25 +0200
committerGibheer <gibheer+git@zero-knowledge.org>2024-09-05 19:38:25 +0200
commit6ea4d2c82de80efc87708e5e182034b7c6c2019e (patch)
tree35c0856a929040216c82153ca62d43b27530a887 /vendor/github.com/jackc/pgx/v5
parent6f64eeace1b66639b9380b44e88a8d54850a4306 (diff)
switch from github.com/lib/pq to github.com/jackc/pgx/v5HEAD20240905master
lib/pq is out of maintenance for some time now, so switch to the newer more active library. Looks like it finally stabilized after a long time.
Diffstat (limited to 'vendor/github.com/jackc/pgx/v5')
-rw-r--r--vendor/github.com/jackc/pgx/v5/.gitignore27
-rw-r--r--vendor/github.com/jackc/pgx/v5/CHANGELOG.md400
-rw-r--r--vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md121
-rw-r--r--vendor/github.com/jackc/pgx/v5/LICENSE22
-rw-r--r--vendor/github.com/jackc/pgx/v5/README.md174
-rw-r--r--vendor/github.com/jackc/pgx/v5/Rakefile18
-rw-r--r--vendor/github.com/jackc/pgx/v5/batch.go433
-rw-r--r--vendor/github.com/jackc/pgx/v5/conn.go1395
-rw-r--r--vendor/github.com/jackc/pgx/v5/copy_from.go276
-rw-r--r--vendor/github.com/jackc/pgx/v5/doc.go194
-rw-r--r--vendor/github.com/jackc/pgx/v5/extended_query_builder.go146
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go70
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/pgio/README.md6
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go6
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/pgio/write.go40
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go331
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go112
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go45
-rw-r--r--vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go77
-rw-r--r--vendor/github.com/jackc/pgx/v5/large_objects.go161
-rw-r--r--vendor/github.com/jackc/pgx/v5/named_args.go295
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/README.md29
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go272
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/config.go918
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go80
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/defaults.go63
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go57
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/doc.go38
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/errors.go248
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go139
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/krb5.go100
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go2346
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/README.md7
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go51
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go58
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go67
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go76
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go51
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go72
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go75
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go75
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/backend.go292
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go50
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go37
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/bind.go223
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go58
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go90
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/close.go81
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go66
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go95
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go59
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go38
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go45
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go96
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go96
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go143
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/describe.go80
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/doc.go11
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go326
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/execute.go58
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/flush.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go454
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go102
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go97
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go49
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go46
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go19
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go71
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go67
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go58
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/parse.go89
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go49
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go120
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/query.go45
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go61
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go166
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go90
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go56
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go49
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go94
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/sync.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go34
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgproto3/trace.go416
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/array.go460
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go405
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/bits.go210
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/bool.go343
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/box.go238
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go952
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/bytea.go255
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/circle.go222
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/composite.go602
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/convert.go108
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/date.go351
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/doc.go188
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go109
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/float4.go319
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/float8.go365
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/hstore.go486
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/inet.go200
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/int.go1980
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb548
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb93
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb62
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh2
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/interval.go301
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/json.go230
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go129
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/line.go225
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/lseg.go238
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/ltree.go122
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go162
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/multirange.go443
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/numeric.go823
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/path.go272
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go2031
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go226
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/point.go266
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/polygon.go253
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/qchar.go141
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/range.go322
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go379
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go125
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go35
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go6
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/text.go223
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go13
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/tid.go241
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/time.go272
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go356
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go366
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/uint32.go303
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/uuid.go281
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go52
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/conn.go134
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/doc.go27
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/pool.go717
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/rows.go116
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/stat.go84
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go33
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/tx.go82
-rw-r--r--vendor/github.com/jackc/pgx/v5/rows.go851
-rw-r--r--vendor/github.com/jackc/pgx/v5/stdlib/sql.go881
-rw-r--r--vendor/github.com/jackc/pgx/v5/tracer.go107
-rw-r--r--vendor/github.com/jackc/pgx/v5/tx.go432
-rw-r--r--vendor/github.com/jackc/pgx/v5/values.go63
152 files changed, 34480 insertions, 0 deletions
diff --git a/vendor/github.com/jackc/pgx/v5/.gitignore b/vendor/github.com/jackc/pgx/v5/.gitignore
new file mode 100644
index 0000000..a2ebbe9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/.gitignore
@@ -0,0 +1,27 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.envrc
+/.testdb
+
+.DS_Store
diff --git a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
new file mode 100644
index 0000000..61b4695
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
@@ -0,0 +1,400 @@
+# 5.6.0 (May 25, 2024)
+
+* Add StrictNamedArgs (Tomas Zahradnicek)
+* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
+* Add SeverityUnlocalized field to PgError / Notice
+* Performance optimization of RowToStructByPos/Name (Zach Olstein)
+* Allow customizing context canceled behavior for pgconn
+* Add ScanLocation to pgtype.Timestamp[tz]Codec
+* Add custom data to pgconn.PgConn
+* Fix ResultReader.Read() to handle nil values
+* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
+* pgconn.SafeToRetry checks for wrapped errors (tjasko)
+* Failed connection attempts include all errors
+* Optimize LargeObject.Read (Mitar)
+* Add tracing for connection acquire and release from pool (ngavinsir)
+* Fix encode driver.Valuer not called when nil
+* Add support for custom JSON marshal and unmarshal (Mitar)
+* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
+
+# 5.5.5 (March 9, 2024)
+
+Use spaces instead of parentheses for SQL sanitization.
+
+This still solves the problem of negative numbers creating a line comment, but this avoids breaking edge cases such as
+`set foo to $1` where the substitution is taking place in a location where an arbitrary expression is not allowed.
+
+# 5.5.4 (March 4, 2024)
+
+Fix CVE-2024-27304
+
+SQL injection can occur if an attacker can cause a single query or bind message to exceed 4 GB in size. An integer
+overflow in the calculated message size can cause the one large message to be sent as multiple messages under the
+attacker's control.
+
+Thanks to Paul Gerste for reporting this issue.
+
+* Fix behavior of CollectRows to return empty slice if Rows are empty (Felix)
+* Fix simple protocol encoding of json.RawMessage
+* Fix *Pipeline.getResults should close pipeline on error
+* Fix panic in TryFindUnderlyingTypeScanPlan (David Kurman)
+* Fix deallocation of invalidated cached statements in a transaction
+* Handle invalid sslkey file
+* Fix scan float4 into sql.Scanner
+* Fix pgtype.Bits not making copy of data from read buffer. This would cause the data to be corrupted by future reads.
+
+# 5.5.3 (February 3, 2024)
+
+* Fix: prepared statement already exists
+* Improve CopyFrom auto-conversion of text-ish values
+* Add ltree type support (Florent Viel)
+* Make some properties of Batch and QueuedQuery public (Pavlo Golub)
+* Add AppendRows function (Edoardo Spadolini)
+* Optimize convert UUID [16]byte to string (Kirill Malikov)
+* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar)
+
+# 5.5.2 (January 13, 2024)
+
+* Allow NamedArgs to start with underscore
+* pgproto3: Maximum message body length support (jeremy.spriet)
+* Upgrade golang.org/x/crypto to v0.17.0
+* Add snake_case support to RowToStructByName (Tikhon Fedulov)
+* Fix: update description cache after exec prepare (James Hartig)
+* Fix: pipeline checks if it is closed (James Hartig and Ryan Fowler)
+* Fix: normalize timeout / context errors during TLS startup (Samuel Stauffer)
+* Add OnPgError for easier centralized error handling (James Hartig)
+
+# 5.5.1 (December 9, 2023)
+
+* Add CopyFromFunc helper function. (robford)
+* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
+* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
+* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
+* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
+* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
+
+# 5.5.0 (November 4, 2023)
+
+* Add CollectExactlyOneRow. (Julien GOTTELAND)
+* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
+* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
+* Statement cache now uses deterministic, stable statement names.
+* database/sql prepared statement names are deterministically generated.
+* Fix: SendBatch wasn't respecting context cancellation.
+* Fix: Timeout error from pipeline is now normalized.
+* Fix: database/sql encoding json.RawMessage to []byte.
+* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
+* stdlib: Use Ping instead of CheckConn in ResetSession
+* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
+
+# 5.4.3 (August 5, 2023)
+
+* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
+* Fix: connect_timeout for sslmode=allow|prefer (smaher-edb)
+* Fix: pgxpool: background health check cannot overflow pool
+* Fix: Check for nil in defer when sending batch (recover properly from panic)
+* Fix: json scan of non-string pointer to pointer
+* Fix: zeronull.Timestamptz should use pgtype.Timestamptz
+* Fix: NewConnsCount was not correctly counting connections created by Acquire directly. (James Hartig)
+* RowTo(AddrOf)StructByPos ignores fields with "-" db tag
+* Optimization: improve text format numeric parsing (horpto)
+
+# 5.4.2 (July 11, 2023)
+
+* Fix: RowScanner errors are fatal to Rows
+* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
+* Hstore text codec internal improvements (Evan Jones)
+* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
+* Fix: Stop background reader as soon as possible.
+* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
+
+# 5.4.1 (June 18, 2023)
+
+* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
+* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
+
+# 5.4.0 (June 14, 2023)
+
+* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
+* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
+* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
+* CancelRequest: don't try to read the reply (Nicola Murino)
+* Fix: correctly handle bool type aliases (Wichert Akkerman)
+* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
+* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
+* Add BeforeClose to pgxpool.Pool (Evan Cordell)
+* Fix: various hstore fixes and optimizations (Evan Jones)
+* Fix: RowToStructByPos with embedded unexported struct
+* Support different bool string representations (Lev Zakharov)
+* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
+* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
+* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
+ a callback.
+* Fix: scanning a table type into a struct
+* Fix: scan array of record to pointer to slice of struct
+* Fix: handle null for json (Cemre Mengu)
+* Batch Query callback is called even when there is an error
+* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
+
+# 5.3.1 (February 27, 2023)
+
+* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)
+* Fix: sql.Scanner not being used in certain cases
+* Add text format jsonpath support
+* Fix: fake non-blocking read adaptive wait time
+
+# 5.3.0 (February 11, 2023)
+
+* Fix: json values work with sql.Scanner
+* Fixed / improved error messages (Mark Chambers and Yevgeny Pats)
+* Fix: support scan into single dimensional arrays
+* Fix: MaxConnLifetimeJitter setting actually jitter (Ben Weintraub)
+* Fix: driver.Value representation of bytea should be []byte not string
+* Fix: better handling of unregistered OIDs
+* CopyFrom can use query cache to avoid extra round trip to get OIDs (Alejandro Do Nascimento Mora)
+* Fix: encode to json ignoring driver.Valuer
+* Support sql.Scanner on renamed base type
+* Fix: pgtype.Numeric text encoding of negative numbers (Mark Chambers)
+* Fix: connect with multiple hostnames when one can't be resolved
+* Upgrade puddle to remove dependency on uber/atomic and fix alignment issue on 32-bit platform
+* Fix: scanning json column into **string
+* Multiple reductions in memory allocations
+* Fake non-blocking read adapts its max wait time
+* Improve CopyFrom performance and reduce memory usage
+* Fix: encode []any to array
+* Fix: LoadType for composite with dropped attributes (Felix Röhrich)
+* Support v4 and v5 stdlib in same program
+* Fix: text format array decoding with string of "NULL"
+* Prefer binary format for arrays
+
+# 5.2.0 (December 5, 2022)
+
+* `tracelog.TraceLog` implements the pgx.PrepareTracer interface. (Vitalii Solodilov)
+* Optimize creating begin transaction SQL string (Petr Evdokimov and ksco)
+* `Conn.LoadType` supports range and multirange types (Vitalii Solodilov)
+* Fix scan `uint` and `uint64` `ScanNumeric`. This resolves a PostgreSQL `numeric` being incorrectly scanned into `uint` and `uint64`.
+
+# 5.1.1 (November 17, 2022)
+
+* Fix simple query sanitizer where query text contains a Unicode replacement character.
+* Remove erroneous `name` argument from `DeallocateAll()`. Technically, this is a breaking change, but given that method was only added 5 days ago this change was accepted. (Bodo Kaiser)
+
+# 5.1.0 (November 12, 2022)
+
+* Update puddle to v2.1.2. This resolves a race condition and a deadlock in pgxpool.
+* `QueryRewriter.RewriteQuery` now returns an error. Technically, this is a breaking change for any external implementers, but given the minimal likelihood that there are actually any external implementers this change was accepted.
+* Expose `GetSSLPassword` support to pgx.
+* Fix encode `ErrorResponse` unknown field handling. This would only affect pgproto3 being used directly as a proxy with a non-PostgreSQL server that included additional error fields.
+* Fix date text format encoding with 5 digit years.
+* Fix date values passed to a `sql.Scanner` as `string` instead of `time.Time`.
+* DateCodec.DecodeValue can return `pgtype.InfinityModifier` instead of `string` for infinite values. This now matches the behavior of the timestamp types.
+* Add domain type support to `Conn.LoadType()`.
+* Add `RowToStructByName` and `RowToAddrOfStructByName`. (Pavlo Golub)
+* Add `Conn.DeallocateAll()` to clear all prepared statements including the statement cache. (Bodo Kaiser)
+
+# 5.0.4 (October 24, 2022)
+
+* Fix: CollectOneRow prefers PostgreSQL error over pgx.ErrorNoRows
+* Fix: some reflect Kind checks to first check for nil
+* Bump golang.org/x/text dependency to placate snyk
+* Fix: RowToStructByPos on structs with multiple anonymous sub-structs (Baptiste Fontaine)
+* Fix: Exec checks if tx is closed
+
+# 5.0.3 (October 14, 2022)
+
+* Fix `driver.Valuer` handling edge cases that could cause infinite loop or crash
+
+# v5.0.2 (October 8, 2022)
+
+* Fix date encoding in text format to always use 2 digits for month and day
+* Prefer driver.Valuer over wrap plans when encoding
+* Fix scan to pointer to pointer to renamed type
+* Allow scanning NULL even if PG and Go types are incompatible
+
+# v5.0.1 (September 24, 2022)
+
+* Fix 32-bit atomic usage
+* Add MarshalJSON for Float8 (yogipristiawan)
+* Add `[` and `]` to text encoding of `Lseg`
+* Fix sqlScannerWrapper NULL handling
+
+# v5.0.0 (September 17, 2022)
+
+## Merged Packages
+
+`github.com/jackc/pgtype`, `github.com/jackc/pgconn`, and `github.com/jackc/pgproto3` are now included in the main
+`github.com/jackc/pgx` repository. Previously there was confusion as to where issues should be reported, additional
+release work due to releasing multiple packages, and less clear changelogs.
+
+## pgconn
+
+`CommandTag` is now an opaque type instead of directly exposing an underlying `[]byte`.
+
+The return value `ResultReader.Values()` is no longer safe to retain a reference to after a subsequent call to `NextRow()` or `Close()`.
+
+`Trace()` method adds low level message tracing similar to the `PQtrace` function in `libpq`.
+
+pgconn now uses non-blocking IO. This is a significant internal restructuring, but it should not cause any visible changes on its own. However, it is important in implementing other new features.
+
+`CheckConn()` checks a connection's liveness by doing a non-blocking read. This can be used to detect database restarts or network interruptions without executing a query or a ping.
+
+pgconn now supports pipeline mode.
+
+`*PgConn.ReceiveResults` removed. Use pipeline mode instead.
+
+`Timeout()` no longer considers `context.Canceled` as a timeout error. `context.DeadlineExceeded` still is considered a timeout error.
+
+## pgxpool
+
+`Connect` and `ConnectConfig` have been renamed to `New` and `NewWithConfig` respectively. The `LazyConnect` option has been removed. Pools always lazily connect.
+
+## pgtype
+
+The `pgtype` package has been significantly changed.
+
+### NULL Representation
+
+Previously, types had a `Status` field that could be `Undefined`, `Null`, or `Present`. This has been changed to a
+`Valid` `bool` field to harmonize with how `database/sql` represents `NULL` and to make the zero value useable.
+
+Previously, a type that implemented `driver.Valuer` would have the `Value` method called even on a nil pointer. All nils
+whether typed or untyped now represent `NULL`.
+
+### Codec and Value Split
+
+Previously, the type system combined decoding and encoding values with the value types. e.g. Type `Int8` both handled
+encoding and decoding the PostgreSQL representation and acted as a value object. This caused some difficulties when
+there was not an exact 1 to 1 relationship between the Go types and the PostgreSQL types For example, scanning a
+PostgreSQL binary `numeric` into a Go `float64` was awkward (see https://github.com/jackc/pgtype/issues/147). This
+concepts have been separated. A `Codec` only has responsibility for encoding and decoding values. Value types are
+generally defined by implementing an interface that a particular `Codec` understands (e.g. `PointScanner` and
+`PointValuer` for the PostgreSQL `point` type).
+
+### Array Types
+
+All array types are now handled by `ArrayCodec` instead of using code generation for each new array type. This also
+means that less common array types such as `point[]` are now supported. `Array[T]` supports PostgreSQL multi-dimensional
+arrays.
+
+### Composite Types
+
+Composite types must be registered before use. `CompositeFields` may still be used to construct and destruct composite
+values, but any type may now implement `CompositeIndexGetter` and `CompositeIndexScanner` to be used as a composite.
+
+### Range Types
+
+Range types are now handled with types `RangeCodec` and `Range[T]`. This allows additional user defined range types to
+easily be handled. Multirange types are handled similarly with `MultirangeCodec` and `Multirange[T]`.
+
+### pgxtype
+
+`LoadDataType` moved to `*Conn` as `LoadType`.
+
+### Bytea
+
+The `Bytea` and `GenericBinary` types have been replaced. Use the following instead:
+
+* `[]byte` - For normal usage directly use `[]byte`.
+* `DriverBytes` - Uses driver memory only available until next database method call. Avoids a copy and an allocation.
+* `PreallocBytes` - Uses preallocated byte slice to avoid an allocation.
+* `UndecodedBytes` - Avoids any decoding. Allows working with raw bytes.
+
+### Dropped lib/pq Support
+
+`pgtype` previously supported and was tested against [lib/pq](https://github.com/lib/pq). While it will continue to work
+in most cases this is no longer supported.
+
+### database/sql Scan
+
+Previously, most `Scan` implementations would convert `[]byte` to `string` automatically to decode a text value. Now
+only `string` is handled. This is to allow the possibility of future binary support in `database/sql` mode by
+considering `[]byte` to be binary format and `string` text format. This change should have no effect for any use with
+`pgx`. The previous behavior was only necessary for `lib/pq` compatibility.
+
+Added `*Map.SQLScanner` to create a `sql.Scanner` for types such as `[]int32` and `Range[T]` that do not implement
+`sql.Scanner` directly.
+
+### Number Type Fields Include Bit size
+
+`Int2`, `Int4`, `Int8`, `Float4`, `Float8`, and `Uint32` fields now include bit size. e.g. `Int` is renamed to `Int64`.
+This matches the convention set by `database/sql`. In addition, for comparable types like `pgtype.Int8` and
+`sql.NullInt64` the structures are identical. This means they can be directly converted one to another.
+
+### 3rd Party Type Integrations
+
+* Extracted integrations with https://github.com/shopspring/decimal and https://github.com/gofrs/uuid to
+ https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid respectively. This trims
+ the pgx dependency tree.
+
+### Other Changes
+
+* `Bit` and `Varbit` are both replaced by the `Bits` type.
+* `CID`, `OID`, `OIDValue`, and `XID` are replaced by the `Uint32` type.
+* `Hstore` is now defined as `map[string]*string`.
+* `JSON` and `JSONB` types removed. Use `[]byte` or `string` directly.
+* `QChar` type removed. Use `rune` or `byte` directly.
+* `Inet` and `Cidr` types removed. Use `netip.Addr` and `netip.Prefix` directly. These types are more memory efficient than the previous `net.IPNet`.
+* `Macaddr` type removed. Use `net.HardwareAddr` directly.
+* Renamed `pgtype.ConnInfo` to `pgtype.Map`.
+* Renamed `pgtype.DataType` to `pgtype.Type`.
+* Renamed `pgtype.None` to `pgtype.Finite`.
+* `RegisterType` now accepts a `*Type` instead of `Type`.
+* Assorted array helper methods and types made private.
+
+## stdlib
+
+* Removed `AcquireConn` and `ReleaseConn` as that functionality has been built in since Go 1.13.
+
+## Reduced Memory Usage by Reusing Read Buffers
+
+Previously, the connection read buffer would allocate large chunks of memory and never reuse them. This allowed
+transferring ownership to anything such as scanned values without incurring an additional allocation and memory copy.
+However, this came at the cost of overall increased memory allocation size. But worse it was also possible to pin large
+chunks of memory by retaining a reference to a small value that originally came directly from the read buffer. Now
+ownership remains with the read buffer and anything needing to retain a value must make a copy.
+
+## Query Execution Modes
+
+Control over automatic prepared statement caching and simple protocol use are now combined into query execution mode.
+See documentation for `QueryExecMode`.
+
+## QueryRewriter Interface and NamedArgs
+
+pgx now supports named arguments with the `NamedArgs` type. This is implemented via the new `QueryRewriter` interface which
+allows arbitrary rewriting of query SQL and arguments.
+
+## RowScanner Interface
+
+The `RowScanner` interface allows a single argument to Rows.Scan to scan the entire row.
+
+## Rows Result Helpers
+
+* `CollectRows` and `RowTo*` functions simplify collecting results into a slice.
+* `CollectOneRow` collects one row using `RowTo*` functions.
+* `ForEachRow` simplifies scanning each row and executing code using the scanned values. `ForEachRow` replaces `QueryFunc`.
+
+## Tx Helpers
+
+Rather than every type that implemented `Begin` or `BeginTx` methods also needing to implement `BeginFunc` and
+`BeginTxFunc` these methods have been converted to functions that take a db that implements `Begin` or `BeginTx`.
+
+## Improved Batch Query Ergonomics
+
+Previously, the code for building a batch went in one place before the call to `SendBatch`, and the code for reading the
+results went in one place after the call to `SendBatch`. This could make it difficult to match up the query and the code
+to handle the results. Now `Queue` returns a `QueuedQuery` which has methods `Query`, `QueryRow`, and `Exec` which can
+be used to register a callback function that will handle the result. Callback functions are called automatically when
+`BatchResults.Close` is called.
+
+## SendBatch Uses Pipeline Mode When Appropriate
+
+Previously, a batch with 10 unique parameterized statements executed 100 times would entail 11 network round trips. 1
+for each prepare / describe and 1 for executing them all. Now pipeline mode is used to prepare / describe all statements
+in a single network round trip. So it would only take 2 round trips.
+
+## Tracing and Logging
+
+Internal logging support has been replaced with tracing hooks. This allows custom tracing integration with tools like OpenTelemetry. Package tracelog provides an adapter for pgx v4 loggers to act as a tracer.
+
+All integrations with 3rd party loggers have been extracted to separate repositories. This trims the pgx dependency
+tree.
diff --git a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
new file mode 100644
index 0000000..c975a93
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md
@@ -0,0 +1,121 @@
+# Contributing
+
+## Discuss Significant Changes
+
+Before you invest a significant amount of time on a change, please create a discussion or issue describing your
+proposal. This will help to ensure your proposed change has a reasonable chance of being merged.
+
+## Avoid Dependencies
+
+Adding a dependency is a big deal. While on occasion a new dependency may be accepted, the default answer to any change
+that adds a dependency is no.
+
+## Development Environment Setup
+
+pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE`
+environment variable. The `PGX_TEST_DATABASE` environment variable can either be a URL or key-value pairs. In addition,
+the standard `PG*` environment variables will be respected. Consider using [direnv](https://github.com/direnv/direnv) to
+simplify environment variable handling.
+
+### Using an Existing PostgreSQL Cluster
+
+If you already have a PostgreSQL development server this is the quickest way to start and run the majority of the pgx
+test suite. Some tests will be skipped that require server configuration changes (e.g. those testing different
+authentication methods).
+
+Create and setup a test database:
+
+```
+export PGDATABASE=pgx_test
+createdb
+psql -c 'create extension hstore;'
+psql -c 'create extension ltree;'
+psql -c 'create domain uint64 as numeric(20,0);'
+```
+
+Ensure a `postgres` user exists. This happens by default in normal PostgreSQL installs, but some installation methods
+such as Homebrew do not.
+
+```
+createuser -s postgres
+```
+
+Ensure your `PGX_TEST_DATABASE` environment variable points to the database you just created and run the tests.
+
+```
+export PGX_TEST_DATABASE="host=/private/tmp database=pgx_test"
+go test ./...
+```
+
+This will run the vast majority of the tests, but some tests will be skipped (e.g. those testing different connection methods).
+
+### Creating a New PostgreSQL Cluster Exclusively for Testing
+
+The following environment variables need to be set both for initial setup and whenever the tests are run. (direnv is
+highly recommended). Depending on your platform, you may need to change the host for `PGX_TEST_UNIX_SOCKET_CONN_STRING`.
+
+```
+export PGPORT=5015
+export PGUSER=postgres
+export PGDATABASE=pgx_test
+export POSTGRESQL_DATA_DIR=postgresql
+
+export PGX_TEST_DATABASE="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_UNIX_SOCKET_CONN_STRING="host=/private/tmp database=pgx_test"
+export PGX_TEST_TCP_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_SCRAM_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_scram password=secret database=pgx_test"
+export PGX_TEST_MD5_PASSWORD_CONN_STRING="host=127.0.0.1 database=pgx_test user=pgx_md5 password=secret"
+export PGX_TEST_PLAIN_PASSWORD_CONN_STRING="host=127.0.0.1 user=pgx_pw password=secret"
+export PGX_TEST_TLS_CONN_STRING="host=localhost user=pgx_ssl password=secret sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem"
+export PGX_SSL_PASSWORD=certpw
+export PGX_TEST_TLS_CLIENT_CONN_STRING="host=localhost user=pgx_sslcert sslmode=verify-full sslrootcert=`pwd`/.testdb/ca.pem database=pgx_test sslcert=`pwd`/.testdb/pgx_sslcert.crt sslkey=`pwd`/.testdb/pgx_sslcert.key"
+```
+
+Create a new database cluster.
+
+```
+initdb --locale=en_US -E UTF-8 --username=postgres .testdb/$POSTGRESQL_DATA_DIR
+
+echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf
+cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf
+
+cd .testdb
+
+# Generate CA, server, and encrypted client certificates.
+go run ../testsetup/generate_certs.go
+
+# Copy certificates to server directory and set permissions.
+cp ca.pem $POSTGRESQL_DATA_DIR/root.crt
+cp localhost.key $POSTGRESQL_DATA_DIR/server.key
+chmod 600 $POSTGRESQL_DATA_DIR/server.key
+cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt
+
+cd ..
+```
+
+
+Start the new cluster. This will be necessary whenever you are running pgx tests.
+
+```
+postgres -D .testdb/$POSTGRESQL_DATA_DIR
+```
+
+Setup the test database in the new cluster.
+
+```
+createdb
+psql --no-psqlrc -f testsetup/postgresql_setup.sql
+```
+
+### PgBouncer
+
+There are tests specific for PgBouncer that will be executed if `PGX_TEST_PGBOUNCER_CONN_STRING` is set.
+
+### Optional Tests
+
+pgx supports multiple connection types and means of authentication. These tests are optional. They will only run if the
+appropriate environment variables are set. In addition, there may be tests specific to particular PostgreSQL versions,
+non-PostgreSQL servers (e.g. CockroachDB), or connection poolers (e.g. PgBouncer). `go test ./... -v | grep SKIP` to see
+if any tests are being skipped.
diff --git a/vendor/github.com/jackc/pgx/v5/LICENSE b/vendor/github.com/jackc/pgx/v5/LICENSE
new file mode 100644
index 0000000..5c486c3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013-2021 Jack Christensen
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jackc/pgx/v5/README.md b/vendor/github.com/jackc/pgx/v5/README.md
new file mode 100644
index 0000000..0cf2c29
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/README.md
@@ -0,0 +1,174 @@
+[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgx/v5.svg)](https://pkg.go.dev/github.com/jackc/pgx/v5)
+[![Build Status](https://github.com/jackc/pgx/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgx/actions/workflows/ci.yml)
+
+# pgx - PostgreSQL Driver and Toolkit
+
+pgx is a pure Go driver and toolkit for PostgreSQL.
+
+The pgx driver is a low-level, high performance interface that exposes PostgreSQL-specific features such as `LISTEN` /
+`NOTIFY` and `COPY`. It also includes an adapter for the standard `database/sql` interface.
+
+The toolkit component is a related set of packages that implement PostgreSQL functionality such as parsing the wire protocol
+and type mapping between PostgreSQL and Go. These underlying packages can be used to implement alternative drivers,
+proxies, load balancers, logical replication clients, etc.
+
+## Example Usage
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/jackc/pgx/v5"
+)
+
+func main() {
+ // urlExample := "postgres://username:password@localhost:5432/database_name"
+ conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
+ os.Exit(1)
+ }
+ defer conn.Close(context.Background())
+
+ var name string
+ var weight int64
+ err = conn.QueryRow(context.Background(), "select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err)
+ os.Exit(1)
+ }
+
+ fmt.Println(name, weight)
+}
+```
+
+See the [getting started guide](https://github.com/jackc/pgx/wiki/Getting-started-with-pgx) for more information.
+
+## Features
+
+* Support for approximately 70 different PostgreSQL types
+* Automatic statement preparation and caching
+* Batch queries
+* Single-round trip query mode
+* Full TLS connection control
+* Binary format support for custom types (allows for much quicker encoding/decoding)
+* `COPY` protocol support for faster bulk data loads
+* Tracing and logging support
+* Connection pool with after-connect hook for arbitrary connection setup
+* `LISTEN` / `NOTIFY`
+* Conversion of PostgreSQL arrays to Go slice mappings for integers, floats, and strings
+* `hstore` support
+* `json` and `jsonb` support
+* Maps `inet` and `cidr` PostgreSQL types to `netip.Addr` and `netip.Prefix`
+* Large object support
+* NULL mapping to pointer to pointer
+* Supports `database/sql.Scanner` and `database/sql/driver.Valuer` interfaces for custom types
+* Notice response handling
+* Simulated nested transactions with savepoints
+
+## Choosing Between the pgx and database/sql Interfaces
+
+The pgx interface is faster. Many PostgreSQL specific features such as `LISTEN` / `NOTIFY` and `COPY` are not available
+through the `database/sql` interface.
+
+The pgx interface is recommended when:
+
+1. The application only targets PostgreSQL.
+2. No other libraries that require `database/sql` are in use.
+
+It is also possible to use the `database/sql` interface and convert a connection to the lower-level pgx interface as needed.
+
+## Testing
+
+See CONTRIBUTING.md for setup instructions.
+
+## Architecture
+
+See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
+
+## Supported Go and PostgreSQL Versions
+
+pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.21 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
+
+## Version Policy
+
+pgx follows semantic versioning for the documented public API on stable releases. `v5` is the latest stable major version.
+
+## PGX Family Libraries
+
+### [github.com/jackc/pglogrepl](https://github.com/jackc/pglogrepl)
+
+pglogrepl provides functionality to act as a client for PostgreSQL logical replication.
+
+### [github.com/jackc/pgmock](https://github.com/jackc/pgmock)
+
+pgmock offers the ability to create a server that mocks the PostgreSQL wire protocol. This is used internally to test pgx by purposely inducing unusual errors. pgproto3 and pgmock together provide most of the foundational tooling required to implement a PostgreSQL proxy or MitM (such as for a custom connection pooler).
+
+### [github.com/jackc/tern](https://github.com/jackc/tern)
+
+tern is a stand-alone SQL migration system.
+
+### [github.com/jackc/pgerrcode](https://github.com/jackc/pgerrcode)
+
+pgerrcode contains constants for the PostgreSQL error codes.
+
+## Adapters for 3rd Party Types
+
+* [github.com/jackc/pgx-gofrs-uuid](https://github.com/jackc/pgx-gofrs-uuid)
+* [github.com/jackc/pgx-shopspring-decimal](https://github.com/jackc/pgx-shopspring-decimal)
+* [github.com/twpayne/pgx-geos](https://github.com/twpayne/pgx-geos) ([PostGIS](https://postgis.net/) and [GEOS](https://libgeos.org/) via [go-geos](https://github.com/twpayne/go-geos))
+* [github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
+
+
+## Adapters for 3rd Party Tracers
+
+* [https://github.com/jackhopner/pgx-xray-tracer](https://github.com/jackhopner/pgx-xray-tracer)
+
+## Adapters for 3rd Party Loggers
+
+These adapters can be used with the tracelog package.
+
+* [github.com/jackc/pgx-go-kit-log](https://github.com/jackc/pgx-go-kit-log)
+* [github.com/jackc/pgx-log15](https://github.com/jackc/pgx-log15)
+* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
+* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
+* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
+* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
+* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
+
+## 3rd Party Libraries with PGX Support
+
+### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
+
+pgxmock is a mock library implementing pgx interfaces.
+pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
+
+### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
+
+Library for scanning data from a database into Go structs and more.
+
+### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
+
+A carefully designed SQL client for making using SQL easier,
+more productive, and less error-prone on Golang.
+
+### [https://github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
+
+Adds GSSAPI / Kerberos authentication support.
+
+### [github.com/wcamarao/pmx](https://github.com/wcamarao/pmx)
+
+Explicit data mapping and scanning library for Go structs and slices.
+
+### [github.com/stephenafamo/scan](https://github.com/stephenafamo/scan)
+
+Type safe and flexible package for scanning database data into Go types.
+Supports, structs, maps, slices and custom mapping functions.
+
+### [https://github.com/z0ne-dev/mgx](https://github.com/z0ne-dev/mgx)
+
+Code first migration library for native pgx (no database/sql abstraction).
diff --git a/vendor/github.com/jackc/pgx/v5/Rakefile b/vendor/github.com/jackc/pgx/v5/Rakefile
new file mode 100644
index 0000000..d957573
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/Rakefile
@@ -0,0 +1,18 @@
+require "erb"
+
+rule '.go' => '.go.erb' do |task|
+ erb = ERB.new(File.read(task.source))
+ File.write(task.name, "// Do not edit. Generated from #{task.source}\n" + erb.result(binding))
+ sh "goimports", "-w", task.name
+end
+
+generated_code_files = [
+ "pgtype/int.go",
+ "pgtype/int_test.go",
+ "pgtype/integration_benchmark_test.go",
+ "pgtype/zeronull/int.go",
+ "pgtype/zeronull/int_test.go"
+]
+
+desc "Generate code"
+task generate: generated_code_files
diff --git a/vendor/github.com/jackc/pgx/v5/batch.go b/vendor/github.com/jackc/pgx/v5/batch.go
new file mode 100644
index 0000000..3540f57
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/batch.go
@@ -0,0 +1,433 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// QueuedQuery is a query that has been queued for execution via a Batch.
+type QueuedQuery struct {
+ SQL string
+ Arguments []any
+ Fn batchItemFunc
+ sd *pgconn.StatementDescription
+}
+
+type batchItemFunc func(br BatchResults) error
+
+// Query sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
+ qq.Fn = func(br BatchResults) error {
+ rows, _ := br.Query()
+ defer rows.Close()
+
+ err := fn(rows)
+ if err != nil {
+ return err
+ }
+ rows.Close()
+
+ return rows.Err()
+ }
+}
+
+// Query sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
+ qq.Fn = func(br BatchResults) error {
+ row := br.QueryRow()
+ return fn(row)
+ }
+}
+
+// Exec sets fn to be called when the response to qq is received.
+func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {
+ qq.Fn = func(br BatchResults) error {
+ ct, err := br.Exec()
+ if err != nil {
+ return err
+ }
+
+ return fn(ct)
+ }
+}
+
+// Batch queries are a way of bundling multiple queries together to avoid
+// unnecessary network round trips. A Batch must only be sent once.
+type Batch struct {
+ QueuedQueries []*QueuedQuery
+}
+
+// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
+// The only pgx option argument that is supported is QueryRewriter. Queries are executed using the
+// connection's DefaultQueryExecMode.
+func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
+ qq := &QueuedQuery{
+ SQL: query,
+ Arguments: arguments,
+ }
+ b.QueuedQueries = append(b.QueuedQueries, qq)
+ return qq
+}
+
+// Len returns number of queries that have been queued so far.
+func (b *Batch) Len() int {
+ return len(b.QueuedQueries)
+}
+
+type BatchResults interface {
+ // Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. Prefer
+ // calling Exec on the QueuedQuery.
+ Exec() (pgconn.CommandTag, error)
+
+ // Query reads the results from the next query in the batch as if the query has been sent with Conn.Query. Prefer
+ // calling Query on the QueuedQuery.
+ Query() (Rows, error)
+
+ // QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.
+ // Prefer calling QueryRow on the QueuedQuery.
+ QueryRow() Row
+
+ // Close closes the batch operation. All unread results are read and any callback functions registered with
+ // QueuedQuery.Query, QueuedQuery.QueryRow, or QueuedQuery.Exec will be called. If a callback function returns an
+ // error or the batch encounters an error subsequent callback functions will not be called.
+ //
+ // Close must be called before the underlying connection can be used again. Any error that occurred during a batch
+ // operation may have made it impossible to resyncronize the connection with the server. In this case the underlying
+ // connection will have been closed.
+ //
+ // Close is safe to call multiple times. If it returns an error subsequent calls will return the same error. Callback
+ // functions will not be rerun.
+ Close() error
+}
+
+type batchResults struct {
+ ctx context.Context
+ conn *Conn
+ mrr *pgconn.MultiResultReader
+ err error
+ b *Batch
+ qqIdx int
+ closed bool
+ endTraced bool
+}
+
+// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
+func (br *batchResults) Exec() (pgconn.CommandTag, error) {
+ if br.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+ if br.closed {
+ return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
+ }
+
+ query, arguments, _ := br.nextQueryAndArgs()
+
+ if !br.mrr.NextResult() {
+ err := br.mrr.Close()
+ if err == nil {
+ err = errors.New("no result")
+ }
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: err,
+ })
+ }
+ return pgconn.CommandTag{}, err
+ }
+
+ commandTag, err := br.mrr.ResultReader().Close()
+ if err != nil {
+ br.err = err
+ br.mrr.Close()
+ }
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ CommandTag: commandTag,
+ Err: br.err,
+ })
+ }
+
+ return commandTag, br.err
+}
+
+// Query reads the results from the next query in the batch as if the query has been sent with Query.
+func (br *batchResults) Query() (Rows, error) {
+ query, arguments, ok := br.nextQueryAndArgs()
+ if !ok {
+ query = "batch query"
+ }
+
+ if br.err != nil {
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ if br.closed {
+ alreadyClosedErr := fmt.Errorf("batch already closed")
+ return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
+ }
+
+ rows := br.conn.getRows(br.ctx, query, arguments)
+ rows.batchTracer = br.conn.batchTracer
+
+ if !br.mrr.NextResult() {
+ rows.err = br.mrr.Close()
+ if rows.err == nil {
+ rows.err = errors.New("no result")
+ }
+ rows.closed = true
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: rows.err,
+ })
+ }
+
+ return rows, rows.err
+ }
+
+ rows.resultReader = br.mrr.ResultReader()
+ return rows, nil
+}
+
+// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
+func (br *batchResults) QueryRow() Row {
+ rows, _ := br.Query()
+ return (*connRow)(rows.(*baseRows))
+
+}
+
+// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
+// resyncronize the connection with the server. In this case the underlying connection will have been closed.
+func (br *batchResults) Close() error {
+ defer func() {
+ if !br.endTraced {
+ if br.conn != nil && br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
+ }
+ br.endTraced = true
+ }
+ }()
+
+ if br.err != nil {
+ return br.err
+ }
+
+ if br.closed {
+ return nil
+ }
+
+ // Read and run fn for all remaining items
+ for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ if br.b.QueuedQueries[br.qqIdx].Fn != nil {
+ err := br.b.QueuedQueries[br.qqIdx].Fn(br)
+ if err != nil {
+ br.err = err
+ }
+ } else {
+ br.Exec()
+ }
+ }
+
+ br.closed = true
+
+ err := br.mrr.Close()
+ if br.err == nil {
+ br.err = err
+ }
+
+ return br.err
+}
+
+func (br *batchResults) earlyError() error {
+ return br.err
+}
+
+func (br *batchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
+ if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ bi := br.b.QueuedQueries[br.qqIdx]
+ query = bi.SQL
+ args = bi.Arguments
+ ok = true
+ br.qqIdx++
+ }
+ return
+}
+
+type pipelineBatchResults struct {
+ ctx context.Context
+ conn *Conn
+ pipeline *pgconn.Pipeline
+ lastRows *baseRows
+ err error
+ b *Batch
+ qqIdx int
+ closed bool
+ endTraced bool
+}
+
+// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
+func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
+ if br.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+ if br.closed {
+ return pgconn.CommandTag{}, fmt.Errorf("batch already closed")
+ }
+ if br.lastRows != nil && br.lastRows.err != nil {
+ return pgconn.CommandTag{}, br.err
+ }
+
+ query, arguments, _ := br.nextQueryAndArgs()
+
+ results, err := br.pipeline.GetResults()
+ if err != nil {
+ br.err = err
+ return pgconn.CommandTag{}, br.err
+ }
+ var commandTag pgconn.CommandTag
+ switch results := results.(type) {
+ case *pgconn.ResultReader:
+ commandTag, br.err = results.Close()
+ default:
+ return pgconn.CommandTag{}, fmt.Errorf("unexpected pipeline result: %T", results)
+ }
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ CommandTag: commandTag,
+ Err: br.err,
+ })
+ }
+
+ return commandTag, br.err
+}
+
+// Query reads the results from the next query in the batch as if the query has been sent with Query.
+func (br *pipelineBatchResults) Query() (Rows, error) {
+ if br.err != nil {
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ if br.closed {
+ alreadyClosedErr := fmt.Errorf("batch already closed")
+ return &baseRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
+ }
+
+ if br.lastRows != nil && br.lastRows.err != nil {
+ br.err = br.lastRows.err
+ return &baseRows{err: br.err, closed: true}, br.err
+ }
+
+ query, arguments, ok := br.nextQueryAndArgs()
+ if !ok {
+ query = "batch query"
+ }
+
+ rows := br.conn.getRows(br.ctx, query, arguments)
+ rows.batchTracer = br.conn.batchTracer
+ br.lastRows = rows
+
+ results, err := br.pipeline.GetResults()
+ if err != nil {
+ br.err = err
+ rows.err = err
+ rows.closed = true
+
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
+ SQL: query,
+ Args: arguments,
+ Err: err,
+ })
+ }
+ } else {
+ switch results := results.(type) {
+ case *pgconn.ResultReader:
+ rows.resultReader = results
+ default:
+ err = fmt.Errorf("unexpected pipeline result: %T", results)
+ br.err = err
+ rows.err = err
+ rows.closed = true
+ }
+ }
+
+ return rows, rows.err
+}
+
+// QueryRow reads the results from the next query in the batch as if the query has been sent with QueryRow.
+func (br *pipelineBatchResults) QueryRow() Row {
+ rows, _ := br.Query()
+ return (*connRow)(rows.(*baseRows))
+
+}
+
+// Close closes the batch operation. Any error that occurred during a batch operation may have made it impossible to
+// resyncronize the connection with the server. In this case the underlying connection will have been closed.
+func (br *pipelineBatchResults) Close() error {
+ defer func() {
+ if !br.endTraced {
+ if br.conn.batchTracer != nil {
+ br.conn.batchTracer.TraceBatchEnd(br.ctx, br.conn, TraceBatchEndData{Err: br.err})
+ }
+ br.endTraced = true
+ }
+ }()
+
+ if br.err == nil && br.lastRows != nil && br.lastRows.err != nil {
+ br.err = br.lastRows.err
+ return br.err
+ }
+
+ if br.closed {
+ return br.err
+ }
+
+ // Read and run fn for all remaining items
+ for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ if br.b.QueuedQueries[br.qqIdx].Fn != nil {
+ err := br.b.QueuedQueries[br.qqIdx].Fn(br)
+ if err != nil {
+ br.err = err
+ }
+ } else {
+ br.Exec()
+ }
+ }
+
+ br.closed = true
+
+ err := br.pipeline.Close()
+ if br.err == nil {
+ br.err = err
+ }
+
+ return br.err
+}
+
+func (br *pipelineBatchResults) earlyError() error {
+ return br.err
+}
+
+func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
+ if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
+ bi := br.b.QueuedQueries[br.qqIdx]
+ query = bi.SQL
+ args = bi.Arguments
+ ok = true
+ br.qqIdx++
+ }
+ return
+}
diff --git a/vendor/github.com/jackc/pgx/v5/conn.go b/vendor/github.com/jackc/pgx/v5/conn.go
new file mode 100644
index 0000000..3117214
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/conn.go
@@ -0,0 +1,1395 @@
+package pgx
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/sanitize"
+ "github.com/jackc/pgx/v5/internal/stmtcache"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// ConnConfig contains all the options used to establish a connection. It must be created by ParseConfig and
+// then it can be modified. A manually initialized ConnConfig will cause ConnectConfig to panic.
+type ConnConfig struct {
+ pgconn.Config
+
+ Tracer QueryTracer
+
+ // Original connection string that was parsed into config.
+ connString string
+
+ // StatementCacheCapacity is maximum size of the statement cache used when executing a query with "cache_statement"
+ // query exec mode.
+ StatementCacheCapacity int
+
+ // DescriptionCacheCapacity is the maximum size of the description cache used when executing a query with
+ // "cache_describe" query exec mode.
+ DescriptionCacheCapacity int
+
+ // DefaultQueryExecMode controls the default mode for executing queries. By default pgx uses the extended protocol
+ // and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as
+ // PGBouncer. In this case it may be preferable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
+ // functionality can be controlled on a per query basis by passing a QueryExecMode as the first query argument.
+ DefaultQueryExecMode QueryExecMode
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// ParseConfigOptions contains options that control how a config is built such as getsslpassword.
+type ParseConfigOptions struct {
+ pgconn.ParseConfigOptions
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the tls.Config:
+// according to the tls.Config docs it must not be modified after creation.
+func (cc *ConnConfig) Copy() *ConnConfig {
+ newConfig := new(ConnConfig)
+ *newConfig = *cc
+ newConfig.Config = *newConfig.Config.Copy()
+ return newConfig
+}
+
+// ConnString returns the connection string as parsed by pgx.ParseConfig into pgx.ConnConfig.
+func (cc *ConnConfig) ConnString() string { return cc.connString }
+
+// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage. Use a connection pool to manage access
+// to multiple database connections from multiple goroutines.
+type Conn struct {
+ pgConn *pgconn.PgConn
+ config *ConnConfig // config used when establishing this connection
+ preparedStatements map[string]*pgconn.StatementDescription
+ statementCache stmtcache.Cache
+ descriptionCache stmtcache.Cache
+
+ queryTracer QueryTracer
+ batchTracer BatchTracer
+ copyFromTracer CopyFromTracer
+ prepareTracer PrepareTracer
+
+ notifications []*pgconn.Notification
+
+ doneChan chan struct{}
+ closedChan chan error
+
+ typeMap *pgtype.Map
+
+ wbuf []byte
+ eqb ExtendedQueryBuilder
+}
+
+// Identifier a PostgreSQL identifier or name. Identifiers can be composed of
+// multiple parts such as ["schema", "table"] or ["table", "column"].
+type Identifier []string
+
+// Sanitize returns a sanitized string safe for SQL interpolation.
+func (ident Identifier) Sanitize() string {
+ parts := make([]string, len(ident))
+ for i := range ident {
+ s := strings.ReplaceAll(ident[i], string([]byte{0}), "")
+ parts[i] = `"` + strings.ReplaceAll(s, `"`, `""`) + `"`
+ }
+ return strings.Join(parts, ".")
+}
+
+var (
+ // ErrNoRows occurs when rows are expected but none are returned.
+ ErrNoRows = errors.New("no rows in result set")
+ // ErrTooManyRows occurs when more rows than expected are returned.
+ ErrTooManyRows = errors.New("too many rows in result set")
+)
+
+var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
+var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
+
+// Connect establishes a connection with a PostgreSQL server with a connection string. See
+// pgconn.Connect for details.
+func Connect(ctx context.Context, connString string) (*Conn, error) {
+ connConfig, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+ return connect(ctx, connConfig)
+}
+
+// ConnectWithOptions behaves exactly like Connect with the addition of options. At the present options is only used to
+// provide a GetSSLPassword function.
+func ConnectWithOptions(ctx context.Context, connString string, options ParseConfigOptions) (*Conn, error) {
+ connConfig, err := ParseConfigWithOptions(connString, options)
+ if err != nil {
+ return nil, err
+ }
+ return connect(ctx, connConfig)
+}
+
+// ConnectConfig establishes a connection with a PostgreSQL server with a configuration struct.
+// connConfig must have been created by ParseConfig.
+func ConnectConfig(ctx context.Context, connConfig *ConnConfig) (*Conn, error) {
+ // In general this improves safety. In particular avoid the config.Config.OnNotification mutation from affecting other
+ // connections with the same config. See https://github.com/jackc/pgx/issues/618.
+ connConfig = connConfig.Copy()
+
+ return connect(ctx, connConfig)
+}
+
+// ParseConfigWithOptions behaves exactly as ParseConfig does with the addition of options. At the present options is
+// only used to provide a GetSSLPassword function.
+func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*ConnConfig, error) {
+ config, err := pgconn.ParseConfigWithOptions(connString, options.ParseConfigOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ statementCacheCapacity := 512
+ if s, ok := config.RuntimeParams["statement_cache_capacity"]; ok {
+ delete(config.RuntimeParams, "statement_cache_capacity")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse statement_cache_capacity: %w", err)
+ }
+ statementCacheCapacity = int(n)
+ }
+
+ descriptionCacheCapacity := 512
+ if s, ok := config.RuntimeParams["description_cache_capacity"]; ok {
+ delete(config.RuntimeParams, "description_cache_capacity")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse description_cache_capacity: %w", err)
+ }
+ descriptionCacheCapacity = int(n)
+ }
+
+ defaultQueryExecMode := QueryExecModeCacheStatement
+ if s, ok := config.RuntimeParams["default_query_exec_mode"]; ok {
+ delete(config.RuntimeParams, "default_query_exec_mode")
+ switch s {
+ case "cache_statement":
+ defaultQueryExecMode = QueryExecModeCacheStatement
+ case "cache_describe":
+ defaultQueryExecMode = QueryExecModeCacheDescribe
+ case "describe_exec":
+ defaultQueryExecMode = QueryExecModeDescribeExec
+ case "exec":
+ defaultQueryExecMode = QueryExecModeExec
+ case "simple_protocol":
+ defaultQueryExecMode = QueryExecModeSimpleProtocol
+ default:
+ return nil, fmt.Errorf("invalid default_query_exec_mode: %s", s)
+ }
+ }
+
+ connConfig := &ConnConfig{
+ Config: *config,
+ createdByParseConfig: true,
+ StatementCacheCapacity: statementCacheCapacity,
+ DescriptionCacheCapacity: descriptionCacheCapacity,
+ DefaultQueryExecMode: defaultQueryExecMode,
+ connString: connString,
+ }
+
+ return connConfig, nil
+}
+
+// ParseConfig creates a ConnConfig from a connection string. ParseConfig handles all options that [pgconn.ParseConfig]
+// does. In addition, it accepts the following options:
+//
+// - default_query_exec_mode.
+// Possible values: "cache_statement", "cache_describe", "describe_exec", "exec", and "simple_protocol". See
+// QueryExecMode constant documentation for the meaning of these values. Default: "cache_statement".
+//
+// - statement_cache_capacity.
+// The maximum size of the statement cache used when executing a query with "cache_statement" query exec mode.
+// Default: 512.
+//
+// - description_cache_capacity.
+// The maximum size of the description cache used when executing a query with "cache_describe" query exec mode.
+// Default: 512.
+func ParseConfig(connString string) (*ConnConfig, error) {
+ return ParseConfigWithOptions(connString, ParseConfigOptions{})
+}
+
+// connect connects to a database. connect takes ownership of config. The caller must not use or access it again.
+func connect(ctx context.Context, config *ConnConfig) (c *Conn, err error) {
+ if connectTracer, ok := config.Tracer.(ConnectTracer); ok {
+ ctx = connectTracer.TraceConnectStart(ctx, TraceConnectStartData{ConnConfig: config})
+ defer func() {
+ connectTracer.TraceConnectEnd(ctx, TraceConnectEndData{Conn: c, Err: err})
+ }()
+ }
+
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ c = &Conn{
+ config: config,
+ typeMap: pgtype.NewMap(),
+ queryTracer: config.Tracer,
+ }
+
+ if t, ok := c.queryTracer.(BatchTracer); ok {
+ c.batchTracer = t
+ }
+ if t, ok := c.queryTracer.(CopyFromTracer); ok {
+ c.copyFromTracer = t
+ }
+ if t, ok := c.queryTracer.(PrepareTracer); ok {
+ c.prepareTracer = t
+ }
+
+ // Only install pgx notification system if no other callback handler is present.
+ if config.Config.OnNotification == nil {
+ config.Config.OnNotification = c.bufferNotifications
+ }
+
+ c.pgConn, err = pgconn.ConnectConfig(ctx, &config.Config)
+ if err != nil {
+ return nil, err
+ }
+
+ c.preparedStatements = make(map[string]*pgconn.StatementDescription)
+ c.doneChan = make(chan struct{})
+ c.closedChan = make(chan error)
+ c.wbuf = make([]byte, 0, 1024)
+
+ if c.config.StatementCacheCapacity > 0 {
+ c.statementCache = stmtcache.NewLRUCache(c.config.StatementCacheCapacity)
+ }
+
+ if c.config.DescriptionCacheCapacity > 0 {
+ c.descriptionCache = stmtcache.NewLRUCache(c.config.DescriptionCacheCapacity)
+ }
+
+ return c, nil
+}
+
+// Close closes a connection. It is safe to call Close on an already closed
+// connection.
+func (c *Conn) Close(ctx context.Context) error {
+ if c.IsClosed() {
+ return nil
+ }
+
+ err := c.pgConn.Close(ctx)
+ return err
+}
+
+// Prepare creates a prepared statement with name and sql. sql can contain placeholders for bound parameters. These
+// placeholders are referenced positionally as $1, $2, etc. name can be used instead of sql with Query, QueryRow, and
+// Exec to execute the statement. It can also be used with Batch.Queue.
+//
+// The underlying PostgreSQL identifier for the prepared statement will be name if name != sql or a digest of sql if
+// name == sql.
+//
+// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same name and sql arguments. This
+// allows a code path to Prepare and Query/Exec without concern for if the statement has already been prepared.
+func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error) {
+ if c.prepareTracer != nil {
+ ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sql})
+ }
+
+ if name != "" {
+ var ok bool
+ if sd, ok = c.preparedStatements[name]; ok && sd.SQL == sql {
+ if c.prepareTracer != nil {
+ c.prepareTracer.TracePrepareEnd(ctx, c, TracePrepareEndData{AlreadyPrepared: true})
+ }
+ return sd, nil
+ }
+ }
+
+ if c.prepareTracer != nil {
+ defer func() {
+ c.prepareTracer.TracePrepareEnd(ctx, c, TracePrepareEndData{Err: err})
+ }()
+ }
+
+ var psName, psKey string
+ if name == sql {
+ digest := sha256.Sum256([]byte(sql))
+ psName = "stmt_" + hex.EncodeToString(digest[0:24])
+ psKey = sql
+ } else {
+ psName = name
+ psKey = name
+ }
+
+ sd, err = c.pgConn.Prepare(ctx, psName, sql, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if psKey != "" {
+ c.preparedStatements[psKey] = sd
+ }
+
+ return sd, nil
+}
+
+// Deallocate releases a prepared statement. Calling Deallocate on a non-existent prepared statement will succeed.
+func (c *Conn) Deallocate(ctx context.Context, name string) error {
+ var psName string
+ sd := c.preparedStatements[name]
+ if sd != nil {
+ psName = sd.Name
+ } else {
+ psName = name
+ }
+
+ err := c.pgConn.Deallocate(ctx, psName)
+ if err != nil {
+ return err
+ }
+
+ if sd != nil {
+ delete(c.preparedStatements, name)
+ }
+
+ return nil
+}
+
+// DeallocateAll releases all previously prepared statements from the server and client, where it also resets the statement and description cache.
+func (c *Conn) DeallocateAll(ctx context.Context) error {
+ c.preparedStatements = map[string]*pgconn.StatementDescription{}
+ if c.config.StatementCacheCapacity > 0 {
+ c.statementCache = stmtcache.NewLRUCache(c.config.StatementCacheCapacity)
+ }
+ if c.config.DescriptionCacheCapacity > 0 {
+ c.descriptionCache = stmtcache.NewLRUCache(c.config.DescriptionCacheCapacity)
+ }
+ _, err := c.pgConn.Exec(ctx, "deallocate all").ReadAll()
+ return err
+}
+
+func (c *Conn) bufferNotifications(_ *pgconn.PgConn, n *pgconn.Notification) {
+ c.notifications = append(c.notifications, n)
+}
+
+// WaitForNotification waits for a PostgreSQL notification. It wraps the underlying pgconn notification system in a
+// slightly more convenient form.
+func (c *Conn) WaitForNotification(ctx context.Context) (*pgconn.Notification, error) {
+ var n *pgconn.Notification
+
+ // Return already received notification immediately
+ if len(c.notifications) > 0 {
+ n = c.notifications[0]
+ c.notifications = c.notifications[1:]
+ return n, nil
+ }
+
+ err := c.pgConn.WaitForNotification(ctx)
+ if len(c.notifications) > 0 {
+ n = c.notifications[0]
+ c.notifications = c.notifications[1:]
+ }
+ return n, err
+}
+
+// IsClosed reports if the connection has been closed.
+func (c *Conn) IsClosed() bool {
+ return c.pgConn.IsClosed()
+}
+
+func (c *Conn) die(err error) {
+ if c.IsClosed() {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // force immediate hard cancel
+ c.pgConn.Close(ctx)
+}
+
+func quoteIdentifier(s string) string {
+ return `"` + strings.ReplaceAll(s, `"`, `""`) + `"`
+}
+
+// Ping delegates to the underlying *pgconn.PgConn.Ping.
+func (c *Conn) Ping(ctx context.Context) error {
+ return c.pgConn.Ping(ctx)
+}
+
+// PgConn returns the underlying *pgconn.PgConn. This is an escape hatch method that allows lower level access to the
+// PostgreSQL connection than pgx exposes.
+//
+// It is strongly recommended that the connection be idle (no in-progress queries) before the underlying *pgconn.PgConn
+// is used and the connection must be returned to the same state before any *pgx.Conn methods are again used.
+func (c *Conn) PgConn() *pgconn.PgConn { return c.pgConn }
+
+// TypeMap returns the connection info used for this connection.
+func (c *Conn) TypeMap() *pgtype.Map { return c.typeMap }
+
+// Config returns a copy of config that was used to establish this connection.
+func (c *Conn) Config() *ConnConfig { return c.config.Copy() }
+
+// Exec executes sql. sql can be either a prepared statement name or an SQL string. arguments should be referenced
+// positionally from the sql string as $1, $2, etc.
+func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ if c.queryTracer != nil {
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: arguments})
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ commandTag, err := c.exec(ctx, sql, arguments...)
+
+ if c.queryTracer != nil {
+ c.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{CommandTag: commandTag, Err: err})
+ }
+
+ return commandTag, err
+}
+
+func (c *Conn) exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ mode := c.config.DefaultQueryExecMode
+ var queryRewriter QueryRewriter
+
+optionLoop:
+ for len(arguments) > 0 {
+ switch arg := arguments[0].(type) {
+ case QueryExecMode:
+ mode = arg
+ arguments = arguments[1:]
+ case QueryRewriter:
+ queryRewriter = arg
+ arguments = arguments[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, fmt.Errorf("rewrite query failed: %w", err)
+ }
+ }
+
+ // Always use simple protocol when there are no arguments.
+ if len(arguments) == 0 {
+ mode = QueryExecModeSimpleProtocol
+ }
+
+ if sd, ok := c.preparedStatements[sql]; ok {
+ return c.execPrepared(ctx, sd, arguments)
+ }
+
+ switch mode {
+ case QueryExecModeCacheStatement:
+ if c.statementCache == nil {
+ return pgconn.CommandTag{}, errDisabledStatementCache
+ }
+ sd := c.statementCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ c.statementCache.Put(sd)
+ }
+
+ return c.execPrepared(ctx, sd, arguments)
+ case QueryExecModeCacheDescribe:
+ if c.descriptionCache == nil {
+ return pgconn.CommandTag{}, errDisabledDescriptionCache
+ }
+ sd := c.descriptionCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, "", sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ c.descriptionCache.Put(sd)
+ }
+
+ return c.execParams(ctx, sd, arguments)
+ case QueryExecModeDescribeExec:
+ sd, err := c.Prepare(ctx, "", sql)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ return c.execPrepared(ctx, sd, arguments)
+ case QueryExecModeExec:
+ return c.execSQLParams(ctx, sql, arguments)
+ case QueryExecModeSimpleProtocol:
+ return c.execSimpleProtocol(ctx, sql, arguments)
+ default:
+ return pgconn.CommandTag{}, fmt.Errorf("unknown QueryExecMode: %v", mode)
+ }
+}
+
+func (c *Conn) execSimpleProtocol(ctx context.Context, sql string, arguments []any) (commandTag pgconn.CommandTag, err error) {
+ if len(arguments) > 0 {
+ sql, err = c.sanitizeForSimpleQuery(sql, arguments...)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ }
+
+ mrr := c.pgConn.Exec(ctx, sql)
+ for mrr.NextResult() {
+ commandTag, _ = mrr.ResultReader().Close()
+ }
+ err = mrr.Close()
+ return commandTag, err
+}
+
+func (c *Conn) execParams(ctx context.Context, sd *pgconn.StatementDescription, arguments []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, sd, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecParams(ctx, sd.SQL, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+func (c *Conn) execPrepared(ctx context.Context, sd *pgconn.StatementDescription, arguments []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, sd, arguments)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+type unknownArgumentTypeQueryExecModeExecError struct {
+ arg any
+}
+
+func (e *unknownArgumentTypeQueryExecModeExecError) Error() string {
+ return fmt.Sprintf("cannot use unregistered type %T as query argument in QueryExecModeExec", e.arg)
+}
+
+func (c *Conn) execSQLParams(ctx context.Context, sql string, args []any) (pgconn.CommandTag, error) {
+ err := c.eqb.Build(c.typeMap, nil, args)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ result := c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+ return result.CommandTag, result.Err
+}
+
+func (c *Conn) getRows(ctx context.Context, sql string, args []any) *baseRows {
+ r := &baseRows{}
+
+ r.ctx = ctx
+ r.queryTracer = c.queryTracer
+ r.typeMap = c.typeMap
+ r.startTime = time.Now()
+ r.sql = sql
+ r.args = args
+ r.conn = c
+
+ return r
+}
+
+type QueryExecMode int32
+
+const (
+ _ QueryExecMode = iota
+
+ // Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single round
+ // trip after the statement is cached. This is the default. If the database schema is modified or the search_path is
+ // changed after a statement is cached then the first execution of a previously cached query may fail. e.g. If the
+ // number of columns returned by a "SELECT *" changes or the type of a column is changed.
+ QueryExecModeCacheStatement
+
+ // Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the extended
+ // protocol. Queries are executed in a single round trip after the description is cached. If the database schema is
+ // modified or the search_path is changed after a statement is cached then the first execution of a previously cached
+ // query may fail. e.g. If the number of columns returned by a "SELECT *" changes or the type of a column is changed.
+ QueryExecModeCacheDescribe
+
+ // Get the statement description on every execution. This uses the extended protocol. Queries require two round trips
+ // to execute. It does not use named prepared statements. But it does use the unnamed prepared statement to get the
+ // statement description on the first round trip and then uses it to execute the query on the second round trip. This
+ // may cause problems with connection poolers that switch the underlying connection between round trips. It is safe
+ // even when the database schema is modified concurrently.
+ QueryExecModeDescribeExec
+
+ // Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
+ // with text formatted parameters and results. Queries are executed in a single round trip. Type mappings can be
+ // registered with pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are
+ // unregistered or ambiguous. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
+ // the PostgreSQL type can use a map[string]string directly as an argument. This mode cannot.
+ QueryExecModeExec
+
+ // Use the simple protocol. Assume the PostgreSQL query parameter types based on the Go type of the arguments.
+ // Queries are executed in a single round trip. Type mappings can be registered with
+ // pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambiguous.
+ // e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know the PostgreSQL type can use
+ // a map[string]string directly as an argument. This mode cannot.
+ //
+ // QueryExecModeSimpleProtocol should have the user application visible behavior as QueryExecModeExec with minor
+ // exceptions such as behavior when multiple result returning queries are erroneously sent in a single string.
+ //
+ // QueryExecModeSimpleProtocol uses client side parameter interpolation. All values are quoted and escaped. Prefer
+ // QueryExecModeExec over QueryExecModeSimpleProtocol whenever possible. In general QueryExecModeSimpleProtocol
+ // should only be used if connecting to a proxy server, connection pool server, or non-PostgreSQL server that does
+ // not support the extended protocol.
+ QueryExecModeSimpleProtocol
+)
+
+func (m QueryExecMode) String() string {
+ switch m {
+ case QueryExecModeCacheStatement:
+ return "cache statement"
+ case QueryExecModeCacheDescribe:
+ return "cache describe"
+ case QueryExecModeDescribeExec:
+ return "describe exec"
+ case QueryExecModeExec:
+ return "exec"
+ case QueryExecModeSimpleProtocol:
+ return "simple protocol"
+ default:
+ return "invalid"
+ }
+}
+
+// QueryResultFormats controls the result format (text=0, binary=1) of a query by result column position.
+type QueryResultFormats []int16
+
+// QueryResultFormatsByOID controls the result format (text=0, binary=1) of a query by the result column OID.
+type QueryResultFormatsByOID map[uint32]int16
+
+// QueryRewriter rewrites a query when used as the first arguments to a query method.
+type QueryRewriter interface {
+ RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error)
+}
+
+// Query sends a query to the server and returns a Rows to read the results. Only errors encountered sending the query
+// and initializing Rows will be returned. Err() on the returned Rows must be checked after the Rows is closed to
+// determine if the query executed successfully.
+//
+// The returned Rows must be closed before the connection can be used again. It is safe to attempt to read from the
+// returned Rows even if an error is returned. The error will be the available in rows.Err() after rows are closed. It
+// is allowed to ignore the error returned from Query and handle it in Rows.
+//
+// It is possible for a call of FieldDescriptions on the returned Rows to return nil even if the Query call did not
+// return an error.
+//
+// It is possible for a query to return one or more rows before encountering an error. In most cases the rows should be
+// collected before processing rather than processed while receiving each row. This avoids the possibility of the
+// application processing rows from a query that the server rejected. The CollectRows function is useful here.
+//
+// An implementor of QueryRewriter may be passed as the first element of args. It can rewrite the sql and change or
+// replace args. For example, NamedArgs is QueryRewriter that implements named arguments.
+//
+// For extra control over how the query is executed, the types QueryExecMode, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (c *Conn) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if c.queryTracer != nil {
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: args})
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ if c.queryTracer != nil {
+ c.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{Err: err})
+ }
+ return &baseRows{err: err, closed: true}, err
+ }
+
+ var resultFormats QueryResultFormats
+ var resultFormatsByOID QueryResultFormatsByOID
+ mode := c.config.DefaultQueryExecMode
+ var queryRewriter QueryRewriter
+
+optionLoop:
+ for len(args) > 0 {
+ switch arg := args[0].(type) {
+ case QueryResultFormats:
+ resultFormats = arg
+ args = args[1:]
+ case QueryResultFormatsByOID:
+ resultFormatsByOID = arg
+ args = args[1:]
+ case QueryExecMode:
+ mode = arg
+ args = args[1:]
+ case QueryRewriter:
+ queryRewriter = arg
+ args = args[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ var err error
+ originalSQL := sql
+ originalArgs := args
+ sql, args, err = queryRewriter.RewriteQuery(ctx, c, sql, args)
+ if err != nil {
+ rows := c.getRows(ctx, originalSQL, originalArgs)
+ err = fmt.Errorf("rewrite query failed: %w", err)
+ rows.fatal(err)
+ return rows, err
+ }
+ }
+
+ // Bypass any statement caching.
+ if sql == "" {
+ mode = QueryExecModeSimpleProtocol
+ }
+
+ c.eqb.reset()
+ rows := c.getRows(ctx, sql, args)
+
+ var err error
+ sd, explicitPreparedStatement := c.preparedStatements[sql]
+ if sd != nil || mode == QueryExecModeCacheStatement || mode == QueryExecModeCacheDescribe || mode == QueryExecModeDescribeExec {
+ if sd == nil {
+ sd, err = c.getStatementDescription(ctx, mode, sql)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+ }
+
+ if len(sd.ParamOIDs) != len(args) {
+ rows.fatal(fmt.Errorf("expected %d arguments, got %d", len(sd.ParamOIDs), len(args)))
+ return rows, rows.err
+ }
+
+ rows.sql = sd.SQL
+
+ err = c.eqb.Build(c.typeMap, sd, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ if resultFormatsByOID != nil {
+ resultFormats = make([]int16, len(sd.Fields))
+ for i := range resultFormats {
+ resultFormats[i] = resultFormatsByOID[uint32(sd.Fields[i].DataTypeOID)]
+ }
+ }
+
+ if resultFormats == nil {
+ resultFormats = c.eqb.ResultFormats
+ }
+
+ if !explicitPreparedStatement && mode == QueryExecModeCacheDescribe {
+ rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, resultFormats)
+ } else {
+ rows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, resultFormats)
+ }
+ } else if mode == QueryExecModeExec {
+ err := c.eqb.Build(c.typeMap, nil, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else if mode == QueryExecModeSimpleProtocol {
+ sql, err = c.sanitizeForSimpleQuery(sql, args...)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ mrr := c.pgConn.Exec(ctx, sql)
+ if mrr.NextResult() {
+ rows.resultReader = mrr.ResultReader()
+ rows.multiResultReader = mrr
+ } else {
+ err = mrr.Close()
+ rows.fatal(err)
+ return rows, err
+ }
+
+ return rows, nil
+ } else {
+ err = fmt.Errorf("unknown QueryExecMode: %v", mode)
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+
+ return rows, rows.err
+}
+
+// getStatementDescription returns the statement description of the sql query
+// according to the given mode.
+//
+// If the mode is one that doesn't require to know the param and result OIDs
+// then nil is returned without error.
+func (c *Conn) getStatementDescription(
+ ctx context.Context,
+ mode QueryExecMode,
+ sql string,
+) (sd *pgconn.StatementDescription, err error) {
+
+ switch mode {
+ case QueryExecModeCacheStatement:
+ if c.statementCache == nil {
+ return nil, errDisabledStatementCache
+ }
+ sd = c.statementCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ if err != nil {
+ return nil, err
+ }
+ c.statementCache.Put(sd)
+ }
+ case QueryExecModeCacheDescribe:
+ if c.descriptionCache == nil {
+ return nil, errDisabledDescriptionCache
+ }
+ sd = c.descriptionCache.Get(sql)
+ if sd == nil {
+ sd, err = c.Prepare(ctx, "", sql)
+ if err != nil {
+ return nil, err
+ }
+ c.descriptionCache.Put(sd)
+ }
+ case QueryExecModeDescribeExec:
+ return c.Prepare(ctx, "", sql)
+ }
+ return sd, err
+}
+
+// QueryRow is a convenience wrapper over Query. Any error that occurs while
+// querying is deferred until calling Scan on the returned Row. That Row will
+// error with ErrNoRows if no rows are returned.
+func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := c.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// SendBatch sends all queued queries to the server at once. All queries are run in an implicit transaction unless
+// explicit transaction control statements are executed. The returned BatchResults must be closed before the connection
+// is used again.
+func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
+ if c.batchTracer != nil {
+ ctx = c.batchTracer.TraceBatchStart(ctx, c, TraceBatchStartData{Batch: b})
+ defer func() {
+ err := br.(interface{ earlyError() error }).earlyError()
+ if err != nil {
+ c.batchTracer.TraceBatchEnd(ctx, c, TraceBatchEndData{Err: err})
+ }
+ }()
+ }
+
+ if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+
+ for _, bi := range b.QueuedQueries {
+ var queryRewriter QueryRewriter
+ sql := bi.SQL
+ arguments := bi.Arguments
+
+ optionLoop:
+ for len(arguments) > 0 {
+ // Update Batch.Queue function comment when additional options are implemented
+ switch arg := arguments[0].(type) {
+ case QueryRewriter:
+ queryRewriter = arg
+ arguments = arguments[1:]
+ default:
+ break optionLoop
+ }
+ }
+
+ if queryRewriter != nil {
+ var err error
+ sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("rewrite query failed: %w", err)}
+ }
+ }
+
+ bi.SQL = sql
+ bi.Arguments = arguments
+ }
+
+ // TODO: changing mode per batch? Update Batch.Queue function comment when implemented
+ mode := c.config.DefaultQueryExecMode
+ if mode == QueryExecModeSimpleProtocol {
+ return c.sendBatchQueryExecModeSimpleProtocol(ctx, b)
+ }
+
+ // All other modes use extended protocol and thus can use prepared statements.
+ for _, bi := range b.QueuedQueries {
+ if sd, ok := c.preparedStatements[bi.SQL]; ok {
+ bi.sd = sd
+ }
+ }
+
+ switch mode {
+ case QueryExecModeExec:
+ return c.sendBatchQueryExecModeExec(ctx, b)
+ case QueryExecModeCacheStatement:
+ return c.sendBatchQueryExecModeCacheStatement(ctx, b)
+ case QueryExecModeCacheDescribe:
+ return c.sendBatchQueryExecModeCacheDescribe(ctx, b)
+ case QueryExecModeDescribeExec:
+ return c.sendBatchQueryExecModeDescribeExec(ctx, b)
+ default:
+ panic("unknown QueryExecMode")
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batch) *batchResults {
+ var sb strings.Builder
+ for i, bi := range b.QueuedQueries {
+ if i > 0 {
+ sb.WriteByte(';')
+ }
+ sql, err := c.sanitizeForSimpleQuery(bi.SQL, bi.Arguments...)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+ sb.WriteString(sql)
+ }
+ mrr := c.pgConn.Exec(ctx, sb.String())
+ return &batchResults{
+ ctx: ctx,
+ conn: c,
+ mrr: mrr,
+ b: b,
+ qqIdx: 0,
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeExec(ctx context.Context, b *Batch) *batchResults {
+ batch := &pgconn.Batch{}
+
+ for _, bi := range b.QueuedQueries {
+ sd := bi.sd
+ if sd != nil {
+ err := c.eqb.Build(c.typeMap, sd, bi.Arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+
+ batch.ExecPrepared(sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else {
+ err := c.eqb.Build(c.typeMap, nil, bi.Arguments)
+ if err != nil {
+ return &batchResults{ctx: ctx, conn: c, err: err}
+ }
+ batch.ExecParams(bi.SQL, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ }
+ }
+
+ c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
+
+ mrr := c.pgConn.ExecBatch(ctx, batch)
+
+ return &batchResults{
+ ctx: ctx,
+ conn: c,
+ mrr: mrr,
+ b: b,
+ qqIdx: 0,
+ }
+}
+
+func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ if c.statementCache == nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledStatementCache, closed: true}
+ }
+
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ sd := c.statementCache.Get(bi.SQL)
+ if sd != nil {
+ bi.sd = sd
+ } else {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd = &pgconn.StatementDescription{
+ Name: stmtcache.StatementName(bi.SQL),
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, c.statementCache)
+}
+
+func (c *Conn) sendBatchQueryExecModeCacheDescribe(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ if c.descriptionCache == nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledDescriptionCache, closed: true}
+ }
+
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ sd := c.descriptionCache.Get(bi.SQL)
+ if sd != nil {
+ bi.sd = sd
+ } else {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd = &pgconn.StatementDescription{
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, c.descriptionCache)
+}
+
+func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
+ distinctNewQueries := []*pgconn.StatementDescription{}
+ distinctNewQueriesIdxMap := make(map[string]int)
+
+ for _, bi := range b.QueuedQueries {
+ if bi.sd == nil {
+ if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
+ bi.sd = distinctNewQueries[idx]
+ } else {
+ sd := &pgconn.StatementDescription{
+ SQL: bi.SQL,
+ }
+ distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
+ distinctNewQueries = append(distinctNewQueries, sd)
+ bi.sd = sd
+ }
+ }
+ }
+
+ return c.sendBatchExtendedWithDescription(ctx, b, distinctNewQueries, nil)
+}
+
+func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, distinctNewQueries []*pgconn.StatementDescription, sdCache stmtcache.Cache) (pbr *pipelineBatchResults) {
+ pipeline := c.pgConn.StartPipeline(ctx)
+ defer func() {
+ if pbr != nil && pbr.err != nil {
+ pipeline.Close()
+ }
+ }()
+
+ // Prepare any needed queries
+ if len(distinctNewQueries) > 0 {
+ for _, sd := range distinctNewQueries {
+ pipeline.SendPrepare(sd.Name, sd.SQL, nil)
+ }
+
+ err := pipeline.Sync()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ for _, sd := range distinctNewQueries {
+ results, err := pipeline.GetResults()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ resultSD, ok := results.(*pgconn.StatementDescription)
+ if !ok {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected statement description, got %T", results), closed: true}
+ }
+
+ // Fill in the previously empty / pending statement descriptions.
+ sd.ParamOIDs = resultSD.ParamOIDs
+ sd.Fields = resultSD.Fields
+ }
+
+ results, err := pipeline.GetResults()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ _, ok := results.(*pgconn.PipelineSync)
+ if !ok {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected sync, got %T", results), closed: true}
+ }
+ }
+
+ // Put all statements into the cache. It's fine if it overflows because HandleInvalidated will clean them up later.
+ if sdCache != nil {
+ for _, sd := range distinctNewQueries {
+ sdCache.Put(sd)
+ }
+ }
+
+ // Queue the queries.
+ for _, bi := range b.QueuedQueries {
+ err := c.eqb.Build(c.typeMap, bi.sd, bi.Arguments)
+ if err != nil {
+ // we wrap the error so we the user can understand which query failed inside the batch
+ err = fmt.Errorf("error building query %s: %w", bi.SQL, err)
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ if bi.sd.Name == "" {
+ pipeline.SendQueryParams(bi.sd.SQL, c.eqb.ParamValues, bi.sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ } else {
+ pipeline.SendQueryPrepared(bi.sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ }
+ }
+
+ err := pipeline.Sync()
+ if err != nil {
+ return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
+ }
+
+ return &pipelineBatchResults{
+ ctx: ctx,
+ conn: c,
+ pipeline: pipeline,
+ b: b,
+ }
+}
+
+func (c *Conn) sanitizeForSimpleQuery(sql string, args ...any) (string, error) {
+ if c.pgConn.ParameterStatus("standard_conforming_strings") != "on" {
+ return "", errors.New("simple protocol queries must be run with standard_conforming_strings=on")
+ }
+
+ if c.pgConn.ParameterStatus("client_encoding") != "UTF8" {
+ return "", errors.New("simple protocol queries must be run with client_encoding=UTF8")
+ }
+
+ var err error
+ valueArgs := make([]any, len(args))
+ for i, a := range args {
+ valueArgs[i], err = convertSimpleArgument(c.typeMap, a)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return sanitize.SanitizeSQL(sql, valueArgs...)
+}
+
+// LoadType inspects the database for typeName and produces a pgtype.Type suitable for registration. typeName must be
+// the name of a type where the underlying type(s) is already understood by pgx. It is for derived types. In particular,
+// typeName must be one of the following:
+// - An array type name of a type that is already registered. e.g. "_foo" when "foo" is registered.
+// - A composite type name where all field types are already registered.
+// - A domain type name where the base type is already registered.
+// - An enum type name.
+// - A range type name where the element type is already registered.
+// - A multirange type name where the element type is already registered.
+func (c *Conn) LoadType(ctx context.Context, typeName string) (*pgtype.Type, error) {
+ var oid uint32
+
+ err := c.QueryRow(ctx, "select $1::text::regtype::oid;", typeName).Scan(&oid)
+ if err != nil {
+ return nil, err
+ }
+
+ var typtype string
+ var typbasetype uint32
+
+ err = c.QueryRow(ctx, "select typtype::text, typbasetype from pg_type where oid=$1", oid).Scan(&typtype, &typbasetype)
+ if err != nil {
+ return nil, err
+ }
+
+ switch typtype {
+ case "b": // array
+ elementOID, err := c.getArrayElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("array element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}, nil
+ case "c": // composite
+ fields, err := c.getCompositeFields(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.CompositeCodec{Fields: fields}}, nil
+ case "d": // domain
+ dt, ok := c.TypeMap().TypeForOID(typbasetype)
+ if !ok {
+ return nil, errors.New("domain base type OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: dt.Codec}, nil
+ case "e": // enum
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.EnumCodec{}}, nil
+ case "r": // range
+ elementOID, err := c.getRangeElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("range element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.RangeCodec{ElementType: dt}}, nil
+ case "m": // multirange
+ elementOID, err := c.getMultiRangeElementOID(ctx, oid)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := c.TypeMap().TypeForOID(elementOID)
+ if !ok {
+ return nil, errors.New("multirange element OID not registered")
+ }
+
+ return &pgtype.Type{Name: typeName, OID: oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}, nil
+ default:
+ return &pgtype.Type{}, errors.New("unknown typtype")
+ }
+}
+
+func (c *Conn) getArrayElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select typelem from pg_type where oid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getRangeElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select rngsubtype from pg_range where rngtypid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getMultiRangeElementOID(ctx context.Context, oid uint32) (uint32, error) {
+ var typelem uint32
+
+ err := c.QueryRow(ctx, "select rngtypid from pg_range where rngmultitypid=$1", oid).Scan(&typelem)
+ if err != nil {
+ return 0, err
+ }
+
+ return typelem, nil
+}
+
+func (c *Conn) getCompositeFields(ctx context.Context, oid uint32) ([]pgtype.CompositeCodecField, error) {
+ var typrelid uint32
+
+ err := c.QueryRow(ctx, "select typrelid from pg_type where oid=$1", oid).Scan(&typrelid)
+ if err != nil {
+ return nil, err
+ }
+
+ var fields []pgtype.CompositeCodecField
+ var fieldName string
+ var fieldOID uint32
+ rows, _ := c.Query(ctx, `select attname, atttypid
+from pg_attribute
+where attrelid=$1
+ and not attisdropped
+ and attnum > 0
+order by attnum`,
+ typrelid,
+ )
+ _, err = ForEachRow(rows, []any{&fieldName, &fieldOID}, func() error {
+ dt, ok := c.TypeMap().TypeForOID(fieldOID)
+ if !ok {
+ return fmt.Errorf("unknown composite type field OID: %v", fieldOID)
+ }
+ fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return fields, nil
+}
+
+func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error {
+ if txStatus := c.pgConn.TxStatus(); txStatus != 'I' && txStatus != 'T' {
+ return nil
+ }
+
+ if c.descriptionCache != nil {
+ c.descriptionCache.RemoveInvalidated()
+ }
+
+ var invalidatedStatements []*pgconn.StatementDescription
+ if c.statementCache != nil {
+ invalidatedStatements = c.statementCache.GetInvalidated()
+ }
+
+ if len(invalidatedStatements) == 0 {
+ return nil
+ }
+
+ pipeline := c.pgConn.StartPipeline(ctx)
+ defer pipeline.Close()
+
+ for _, sd := range invalidatedStatements {
+ pipeline.SendDeallocate(sd.Name)
+ }
+
+ err := pipeline.Sync()
+ if err != nil {
+ return fmt.Errorf("failed to deallocate cached statement(s): %w", err)
+ }
+
+ err = pipeline.Close()
+ if err != nil {
+ return fmt.Errorf("failed to deallocate cached statement(s): %w", err)
+ }
+
+ c.statementCache.RemoveInvalidated()
+ for _, sd := range invalidatedStatements {
+ delete(c.preparedStatements, sd.Name)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/copy_from.go b/vendor/github.com/jackc/pgx/v5/copy_from.go
new file mode 100644
index 0000000..abcd223
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/copy_from.go
@@ -0,0 +1,276 @@
+package pgx
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// CopyFromRows returns a CopyFromSource interface over the provided rows slice
+// making it usable by *Conn.CopyFrom.
+func CopyFromRows(rows [][]any) CopyFromSource {
+ return &copyFromRows{rows: rows, idx: -1}
+}
+
+type copyFromRows struct {
+ rows [][]any
+ idx int
+}
+
+func (ctr *copyFromRows) Next() bool {
+ ctr.idx++
+ return ctr.idx < len(ctr.rows)
+}
+
+func (ctr *copyFromRows) Values() ([]any, error) {
+ return ctr.rows[ctr.idx], nil
+}
+
+func (ctr *copyFromRows) Err() error {
+ return nil
+}
+
+// CopyFromSlice returns a CopyFromSource interface over a dynamic func
+// making it usable by *Conn.CopyFrom.
+func CopyFromSlice(length int, next func(int) ([]any, error)) CopyFromSource {
+ return &copyFromSlice{next: next, idx: -1, len: length}
+}
+
+type copyFromSlice struct {
+ next func(int) ([]any, error)
+ idx int
+ len int
+ err error
+}
+
+func (cts *copyFromSlice) Next() bool {
+ cts.idx++
+ return cts.idx < cts.len
+}
+
+func (cts *copyFromSlice) Values() ([]any, error) {
+ values, err := cts.next(cts.idx)
+ if err != nil {
+ cts.err = err
+ }
+ return values, err
+}
+
+func (cts *copyFromSlice) Err() error {
+ return cts.err
+}
+
+// CopyFromFunc returns a CopyFromSource interface that relies on nxtf for values.
+// nxtf returns rows until it either signals an 'end of data' by returning row=nil and err=nil,
+// or it returns an error. If nxtf returns an error, the copy is aborted.
+func CopyFromFunc(nxtf func() (row []any, err error)) CopyFromSource {
+ return &copyFromFunc{next: nxtf}
+}
+
+type copyFromFunc struct {
+ next func() ([]any, error)
+ valueRow []any
+ err error
+}
+
+func (g *copyFromFunc) Next() bool {
+ g.valueRow, g.err = g.next()
+ // only return true if valueRow exists and no error
+ return g.valueRow != nil && g.err == nil
+}
+
+func (g *copyFromFunc) Values() ([]any, error) {
+ return g.valueRow, g.err
+}
+
+func (g *copyFromFunc) Err() error {
+ return g.err
+}
+
+// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
+type CopyFromSource interface {
+ // Next returns true if there is another row and makes the next row data
+ // available to Values(). When there are no more rows available or an error
+ // has occurred it returns false.
+ Next() bool
+
+ // Values returns the values for the current row.
+ Values() ([]any, error)
+
+ // Err returns any error that has been encountered by the CopyFromSource. If
+ // this is not nil *Conn.CopyFrom will abort the copy.
+ Err() error
+}
+
+type copyFrom struct {
+ conn *Conn
+ tableName Identifier
+ columnNames []string
+ rowSrc CopyFromSource
+ readerErrChan chan error
+ mode QueryExecMode
+}
+
+func (ct *copyFrom) run(ctx context.Context) (int64, error) {
+ if ct.conn.copyFromTracer != nil {
+ ctx = ct.conn.copyFromTracer.TraceCopyFromStart(ctx, ct.conn, TraceCopyFromStartData{
+ TableName: ct.tableName,
+ ColumnNames: ct.columnNames,
+ })
+ }
+
+ quotedTableName := ct.tableName.Sanitize()
+ cbuf := &bytes.Buffer{}
+ for i, cn := range ct.columnNames {
+ if i != 0 {
+ cbuf.WriteString(", ")
+ }
+ cbuf.WriteString(quoteIdentifier(cn))
+ }
+ quotedColumnNames := cbuf.String()
+
+ var sd *pgconn.StatementDescription
+ switch ct.mode {
+ case QueryExecModeExec, QueryExecModeSimpleProtocol:
+ // These modes don't support the binary format. Before the inclusion of the
+ // QueryExecModes, Conn.Prepare was called on every COPY operation to get
+ // the OIDs. These prepared statements were not cached.
+ //
+ // Since that's the same behavior provided by QueryExecModeDescribeExec,
+ // we'll default to that mode.
+ ct.mode = QueryExecModeDescribeExec
+ fallthrough
+ case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
+ var err error
+ sd, err = ct.conn.getStatementDescription(
+ ctx,
+ ct.mode,
+ fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName),
+ )
+ if err != nil {
+ return 0, fmt.Errorf("statement description failed: %w", err)
+ }
+ default:
+ return 0, fmt.Errorf("unknown QueryExecMode: %v", ct.mode)
+ }
+
+ r, w := io.Pipe()
+ doneChan := make(chan struct{})
+
+ go func() {
+ defer close(doneChan)
+
+ // Purposely NOT using defer w.Close(). See https://github.com/golang/go/issues/24283.
+ buf := ct.conn.wbuf
+
+ buf = append(buf, "PGCOPY\n\377\r\n\000"...)
+ buf = pgio.AppendInt32(buf, 0)
+ buf = pgio.AppendInt32(buf, 0)
+
+ moreRows := true
+ for moreRows {
+ var err error
+ moreRows, buf, err = ct.buildCopyBuf(buf, sd)
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if ct.rowSrc.Err() != nil {
+ w.CloseWithError(ct.rowSrc.Err())
+ return
+ }
+
+ if len(buf) > 0 {
+ _, err = w.Write(buf)
+ if err != nil {
+ w.Close()
+ return
+ }
+ }
+
+ buf = buf[:0]
+ }
+
+ w.Close()
+ }()
+
+ commandTag, err := ct.conn.pgConn.CopyFrom(ctx, r, fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames))
+
+ r.Close()
+ <-doneChan
+
+ if ct.conn.copyFromTracer != nil {
+ ct.conn.copyFromTracer.TraceCopyFromEnd(ctx, ct.conn, TraceCopyFromEndData{
+ CommandTag: commandTag,
+ Err: err,
+ })
+ }
+
+ return commandTag.RowsAffected(), err
+}
+
+func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (bool, []byte, error) {
+ const sendBufSize = 65536 - 5 // The packet has a 5-byte header
+ lastBufLen := 0
+ largestRowLen := 0
+
+ for ct.rowSrc.Next() {
+ lastBufLen = len(buf)
+
+ values, err := ct.rowSrc.Values()
+ if err != nil {
+ return false, nil, err
+ }
+ if len(values) != len(ct.columnNames) {
+ return false, nil, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
+ for i, val := range values {
+ buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, val)
+ if err != nil {
+ return false, nil, err
+ }
+ }
+
+ rowLen := len(buf) - lastBufLen
+ if rowLen > largestRowLen {
+ largestRowLen = rowLen
+ }
+
+ // Try not to overflow size of the buffer PgConn.CopyFrom will be reading into. If that happens then the nature of
+ // io.Pipe means that the next Read will be short. This can lead to pathological send sizes such as 65531, 13, 65531
+ // 13, 65531, 13, 65531, 13.
+ if len(buf) > sendBufSize-largestRowLen {
+ return true, buf, nil
+ }
+ }
+
+ return false, buf, nil
+}
+
+// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. It returns the number of rows copied and
+// an error.
+//
+// CopyFrom requires all values use the binary format. A pgtype.Type that supports the binary format must be registered
+// for the type of each column. Almost all types implemented by pgx support the binary format.
+//
+// Even though enum types appear to be strings they still must be registered to use with CopyFrom. This can be done with
+// Conn.LoadType and pgtype.Map.RegisterType.
+func (c *Conn) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ ct := &copyFrom{
+ conn: c,
+ tableName: tableName,
+ columnNames: columnNames,
+ rowSrc: rowSrc,
+ readerErrChan: make(chan error),
+ mode: c.config.DefaultQueryExecMode,
+ }
+
+ return ct.run(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/doc.go b/vendor/github.com/jackc/pgx/v5/doc.go
new file mode 100644
index 0000000..bc0391d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/doc.go
@@ -0,0 +1,194 @@
+// Package pgx is a PostgreSQL database driver.
+/*
+pgx provides a native PostgreSQL driver and can act as a database/sql driver. The native PostgreSQL interface is similar
+to the database/sql interface while providing better speed and access to PostgreSQL specific features. Use
+github.com/jackc/pgx/v5/stdlib to use pgx as a database/sql compatible driver. See that package's documentation for
+details.
+
+Establishing a Connection
+
+The primary way of establishing a connection is with [pgx.Connect]:
+
+ conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+
+The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be
+specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the
+connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection
+string.
+
+Connection Pool
+
+[*pgx.Conn] represents a single connection to the database and is not concurrency safe. Use package
+github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool.
+
+Query Interface
+
+pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and
+ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(),
+rows.Scan, and rows.Err().
+
+CollectRows can be used collect all returned rows into a slice.
+
+ rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 5)
+ numbers, err := pgx.CollectRows(rows, pgx.RowTo[int32])
+ if err != nil {
+ return err
+ }
+ // numbers => [1 2 3 4 5]
+
+ForEachRow can be used to execute a callback function for every row. This is often easier than iterating over rows
+directly.
+
+ var sum, n int32
+ rows, _ := conn.Query(context.Background(), "select generate_series(1,$1)", 10)
+ _, err := pgx.ForEachRow(rows, []any{&n}, func() error {
+ sum += n
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+pgx also implements QueryRow in the same style as database/sql.
+
+ var name string
+ var weight int64
+ err := conn.QueryRow(context.Background(), "select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+ if err != nil {
+ return err
+ }
+
+Use Exec to execute a query that does not return a result set.
+
+ commandTag, err := conn.Exec(context.Background(), "delete from widgets where id=$1", 42)
+ if err != nil {
+ return err
+ }
+ if commandTag.RowsAffected() != 1 {
+ return errors.New("No row found to delete")
+ }
+
+PostgreSQL Data Types
+
+pgx uses the pgtype package to converting Go values to and from PostgreSQL values. It supports many PostgreSQL types
+directly and is customizable and extendable. User defined data types such as enums, domains, and composite types may
+require type registration. See that package's documentation for details.
+
+Transactions
+
+Transactions are started by calling Begin.
+
+ tx, err := conn.Begin(context.Background())
+ if err != nil {
+ return err
+ }
+ // Rollback is safe to call even if the tx is already closed, so if
+ // the tx commits successfully, this is a no-op
+ defer tx.Rollback(context.Background())
+
+ _, err = tx.Exec(context.Background(), "insert into foo(id) values (1)")
+ if err != nil {
+ return err
+ }
+
+ err = tx.Commit(context.Background())
+ if err != nil {
+ return err
+ }
+
+The Tx returned from Begin also implements the Begin method. This can be used to implement pseudo nested transactions.
+These are internally implemented with savepoints.
+
+Use BeginTx to control the transaction mode. BeginTx also can be used to ensure a new transaction is created instead of
+a pseudo nested transaction.
+
+BeginFunc and BeginTxFunc are functions that begin a transaction, execute a function, and commit or rollback the
+transaction depending on the return value of the function. These can be simpler and less error prone to use.
+
+ err = pgx.BeginFunc(context.Background(), conn, func(tx pgx.Tx) error {
+ _, err := tx.Exec(context.Background(), "insert into foo(id) values (1)")
+ return err
+ })
+ if err != nil {
+ return err
+ }
+
+Prepared Statements
+
+Prepared statements can be manually created with the Prepare method. However, this is rarely necessary because pgx
+includes an automatic statement cache by default. Queries run through the normal Query, QueryRow, and Exec functions are
+automatically prepared on first execution and the prepared statement is reused on subsequent executions. See ParseConfig
+for information on how to customize or disable the statement cache.
+
+Copy Protocol
+
+Use CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL copy protocol. CopyFrom accepts a
+CopyFromSource interface. If the data is already in a [][]any use CopyFromRows to wrap it in a CopyFromSource interface.
+Or implement CopyFromSource to avoid buffering the entire data set in memory.
+
+ rows := [][]any{
+ {"John", "Smith", int32(36)},
+ {"Jane", "Doe", int32(29)},
+ }
+
+ copyCount, err := conn.CopyFrom(
+ context.Background(),
+ pgx.Identifier{"people"},
+ []string{"first_name", "last_name", "age"},
+ pgx.CopyFromRows(rows),
+ )
+
+When you already have a typed array using CopyFromSlice can be more convenient.
+
+ rows := []User{
+ {"John", "Smith", 36},
+ {"Jane", "Doe", 29},
+ }
+
+ copyCount, err := conn.CopyFrom(
+ context.Background(),
+ pgx.Identifier{"people"},
+ []string{"first_name", "last_name", "age"},
+ pgx.CopyFromSlice(len(rows), func(i int) ([]any, error) {
+ return []any{rows[i].FirstName, rows[i].LastName, rows[i].Age}, nil
+ }),
+ )
+
+CopyFrom can be faster than an insert with as few as 5 rows.
+
+Listen and Notify
+
+pgx can listen to the PostgreSQL notification system with the `Conn.WaitForNotification` method. It blocks until a
+notification is received or the context is canceled.
+
+ _, err := conn.Exec(context.Background(), "listen channelname")
+ if err != nil {
+ return err
+ }
+
+ notification, err := conn.WaitForNotification(context.Background())
+ if err != nil {
+ return err
+ }
+ // do something with notification
+
+
+Tracing and Logging
+
+pgx supports tracing by setting ConnConfig.Tracer.
+
+In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
+
+For debug tracing of the actual PostgreSQL wire protocol messages see github.com/jackc/pgx/v5/pgproto3.
+
+Lower Level PostgreSQL Functionality
+
+github.com/jackc/pgx/v5/pgconn contains a lower level PostgreSQL driver roughly at the level of libpq. pgx.Conn in
+implemented on top of pgconn. The Conn.PgConn() method can be used to access this lower layer.
+
+PgBouncer
+
+By default pgx automatically uses prepared statements. Prepared statements are incompatible with PgBouncer. This can be
+disabled by setting a different QueryExecMode in ConnConfig.DefaultQueryExecMode.
+*/
+package pgx
diff --git a/vendor/github.com/jackc/pgx/v5/extended_query_builder.go b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go
new file mode 100644
index 0000000..526b0e9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/extended_query_builder.go
@@ -0,0 +1,146 @@
+package pgx
+
+import (
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// ExtendedQueryBuilder is used to choose the parameter formats, to format the parameters and to choose the result
+// formats for an extended query.
+type ExtendedQueryBuilder struct {
+ ParamValues [][]byte
+ paramValueBytes []byte
+ ParamFormats []int16
+ ResultFormats []int16
+}
+
+// Build sets ParamValues, ParamFormats, and ResultFormats for use with *PgConn.ExecParams or *PgConn.ExecPrepared. If
+// sd is nil then QueryExecModeExec behavior will be used.
+func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error {
+ eqb.reset()
+
+ if sd == nil {
+ for i := range args {
+ err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i])
+ if err != nil {
+ err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
+ return err
+ }
+ }
+ return nil
+ }
+
+ if len(sd.ParamOIDs) != len(args) {
+ return fmt.Errorf("mismatched param and argument count")
+ }
+
+ for i := range args {
+ err := eqb.appendParam(m, sd.ParamOIDs[i], -1, args[i])
+ if err != nil {
+ err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
+ return err
+ }
+ }
+
+ for i := range sd.Fields {
+ eqb.appendResultFormat(m.FormatCodeForOID(sd.Fields[i].DataTypeOID))
+ }
+
+ return nil
+}
+
+// appendParam appends a parameter to the query. format may be -1 to automatically choose the format. If arg is nil it
+// must be an untyped nil.
+func (eqb *ExtendedQueryBuilder) appendParam(m *pgtype.Map, oid uint32, format int16, arg any) error {
+ if format == -1 {
+ preferredFormat := eqb.chooseParameterFormatCode(m, oid, arg)
+ preferredErr := eqb.appendParam(m, oid, preferredFormat, arg)
+ if preferredErr == nil {
+ return nil
+ }
+
+ var otherFormat int16
+ if preferredFormat == TextFormatCode {
+ otherFormat = BinaryFormatCode
+ } else {
+ otherFormat = TextFormatCode
+ }
+
+ otherErr := eqb.appendParam(m, oid, otherFormat, arg)
+ if otherErr == nil {
+ return nil
+ }
+
+ return preferredErr // return the error from the preferred format
+ }
+
+ v, err := eqb.encodeExtendedParamValue(m, oid, format, arg)
+ if err != nil {
+ return err
+ }
+
+ eqb.ParamFormats = append(eqb.ParamFormats, format)
+ eqb.ParamValues = append(eqb.ParamValues, v)
+
+ return nil
+}
+
+// appendResultFormat appends a result format to the query.
+func (eqb *ExtendedQueryBuilder) appendResultFormat(format int16) {
+ eqb.ResultFormats = append(eqb.ResultFormats, format)
+}
+
+// reset readies eqb to build another query.
+func (eqb *ExtendedQueryBuilder) reset() {
+ eqb.ParamValues = eqb.ParamValues[0:0]
+ eqb.paramValueBytes = eqb.paramValueBytes[0:0]
+ eqb.ParamFormats = eqb.ParamFormats[0:0]
+ eqb.ResultFormats = eqb.ResultFormats[0:0]
+
+ if cap(eqb.ParamValues) > 64 {
+ eqb.ParamValues = make([][]byte, 0, 64)
+ }
+
+ if cap(eqb.paramValueBytes) > 256 {
+ eqb.paramValueBytes = make([]byte, 0, 256)
+ }
+
+ if cap(eqb.ParamFormats) > 64 {
+ eqb.ParamFormats = make([]int16, 0, 64)
+ }
+ if cap(eqb.ResultFormats) > 64 {
+ eqb.ResultFormats = make([]int16, 0, 64)
+ }
+}
+
+func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) {
+ if eqb.paramValueBytes == nil {
+ eqb.paramValueBytes = make([]byte, 0, 128)
+ }
+
+ pos := len(eqb.paramValueBytes)
+
+ buf, err := m.Encode(oid, formatCode, arg, eqb.paramValueBytes)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ eqb.paramValueBytes = buf
+ return eqb.paramValueBytes[pos:], nil
+}
+
+// chooseParameterFormatCode determines the correct format code for an
+// argument to a prepared statement. It defaults to TextFormatCode if no
+// determination can be made.
+func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid uint32, arg any) int16 {
+ switch arg.(type) {
+ case string, *string:
+ return TextFormatCode
+ }
+
+ return m.FormatCodeForOID(oid)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go b/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
new file mode 100644
index 0000000..89e0c22
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/iobufpool/iobufpool.go
@@ -0,0 +1,70 @@
+// Package iobufpool implements a global segregated-fit pool of buffers for IO.
+//
+// It uses *[]byte instead of []byte to avoid the sync.Pool allocation with Put. Unfortunately, using a pointer to avoid
+// an allocation is purposely not documented. https://github.com/golang/go/issues/16323
+package iobufpool
+
+import "sync"
+
+const minPoolExpOf2 = 8
+
+var pools [18]*sync.Pool
+
+func init() {
+ for i := range pools {
+ bufLen := 1 << (minPoolExpOf2 + i)
+ pools[i] = &sync.Pool{
+ New: func() any {
+ buf := make([]byte, bufLen)
+ return &buf
+ },
+ }
+ }
+}
+
+// Get gets a []byte of len size with cap <= size*2.
+func Get(size int) *[]byte {
+ i := getPoolIdx(size)
+ if i >= len(pools) {
+ buf := make([]byte, size)
+ return &buf
+ }
+
+ ptrBuf := (pools[i].Get().(*[]byte))
+ *ptrBuf = (*ptrBuf)[:size]
+
+ return ptrBuf
+}
+
+func getPoolIdx(size int) int {
+ size--
+ size >>= minPoolExpOf2
+ i := 0
+ for size > 0 {
+ size >>= 1
+ i++
+ }
+
+ return i
+}
+
+// Put returns buf to the pool.
+func Put(buf *[]byte) {
+ i := putPoolIdx(cap(*buf))
+ if i < 0 {
+ return
+ }
+
+ pools[i].Put(buf)
+}
+
+func putPoolIdx(size int) int {
+ minPoolSize := 1 << minPoolExpOf2
+ for i := range pools {
+ if size == minPoolSize<<i {
+ return i
+ }
+ }
+
+ return -1
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/pgio/README.md b/vendor/github.com/jackc/pgx/v5/internal/pgio/README.md
new file mode 100644
index 0000000..b2fc580
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/pgio/README.md
@@ -0,0 +1,6 @@
+# pgio
+
+Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
+
+pgio provides functions for appending integers to a []byte while doing byte
+order conversion.
diff --git a/vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go b/vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go
new file mode 100644
index 0000000..ef2dcc7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/pgio/doc.go
@@ -0,0 +1,6 @@
+// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
+/*
+pgio provides functions for appending integers to a []byte while doing byte
+order conversion.
+*/
+package pgio
diff --git a/vendor/github.com/jackc/pgx/v5/internal/pgio/write.go b/vendor/github.com/jackc/pgx/v5/internal/pgio/write.go
new file mode 100644
index 0000000..96aedf9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/pgio/write.go
@@ -0,0 +1,40 @@
+package pgio
+
+import "encoding/binary"
+
+func AppendUint16(buf []byte, n uint16) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0)
+ binary.BigEndian.PutUint16(buf[wp:], n)
+ return buf
+}
+
+func AppendUint32(buf []byte, n uint32) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0)
+ binary.BigEndian.PutUint32(buf[wp:], n)
+ return buf
+}
+
+func AppendUint64(buf []byte, n uint64) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
+ binary.BigEndian.PutUint64(buf[wp:], n)
+ return buf
+}
+
+func AppendInt16(buf []byte, n int16) []byte {
+ return AppendUint16(buf, uint16(n))
+}
+
+func AppendInt32(buf []byte, n int32) []byte {
+ return AppendUint32(buf, uint32(n))
+}
+
+func AppendInt64(buf []byte, n int64) []byte {
+ return AppendUint64(buf, uint64(n))
+}
+
+func SetInt32(buf []byte, n int32) {
+ binary.BigEndian.PutUint32(buf, uint32(n))
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go b/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
new file mode 100644
index 0000000..df58c44
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
@@ -0,0 +1,331 @@
+package sanitize
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// Part is either a string or an int. A string is raw SQL. An int is a
+// argument placeholder.
+type Part any
+
+type Query struct {
+ Parts []Part
+}
+
+// utf.DecodeRune returns the utf8.RuneError for errors. But that is actually rune U+FFFD -- the unicode replacement
+// character. utf8.RuneError is not an error if it is also width 3.
+//
+// https://github.com/jackc/pgx/issues/1380
+const replacementcharacterwidth = 3
+
+func (q *Query) Sanitize(args ...any) (string, error) {
+ argUse := make([]bool, len(args))
+ buf := &bytes.Buffer{}
+
+ for _, part := range q.Parts {
+ var str string
+ switch part := part.(type) {
+ case string:
+ str = part
+ case int:
+ argIdx := part - 1
+
+ if argIdx < 0 {
+ return "", fmt.Errorf("first sql argument must be > 0")
+ }
+
+ if argIdx >= len(args) {
+ return "", fmt.Errorf("insufficient arguments")
+ }
+ arg := args[argIdx]
+ switch arg := arg.(type) {
+ case nil:
+ str = "null"
+ case int64:
+ str = strconv.FormatInt(arg, 10)
+ case float64:
+ str = strconv.FormatFloat(arg, 'f', -1, 64)
+ case bool:
+ str = strconv.FormatBool(arg)
+ case []byte:
+ str = QuoteBytes(arg)
+ case string:
+ str = QuoteString(arg)
+ case time.Time:
+ str = arg.Truncate(time.Microsecond).Format("'2006-01-02 15:04:05.999999999Z07:00:00'")
+ default:
+ return "", fmt.Errorf("invalid arg type: %T", arg)
+ }
+ argUse[argIdx] = true
+
+ // Prevent SQL injection via Line Comment Creation
+ // https://github.com/jackc/pgx/security/advisories/GHSA-m7wr-2xf7-cm9p
+ str = " " + str + " "
+ default:
+ return "", fmt.Errorf("invalid Part type: %T", part)
+ }
+ buf.WriteString(str)
+ }
+
+ for i, used := range argUse {
+ if !used {
+ return "", fmt.Errorf("unused argument: %d", i)
+ }
+ }
+ return buf.String(), nil
+}
+
+func NewQuery(sql string) (*Query, error) {
+ l := &sqlLexer{
+ src: sql,
+ stateFn: rawState,
+ }
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ query := &Query{Parts: l.parts}
+
+ return query, nil
+}
+
+func QuoteString(str string) string {
+ return "'" + strings.ReplaceAll(str, "'", "''") + "'"
+}
+
+func QuoteBytes(buf []byte) string {
+ return `'\x` + hex.EncodeToString(buf) + "'"
+}
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ nested int // multiline comment nesting level.
+ stateFn stateFn
+ parts []Part
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '$':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if '0' <= nextRune && nextRune <= '9' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return placeholderState
+ }
+ case '-':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '-' {
+ l.pos += width
+ return oneLineCommentState
+ }
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ return multilineCommentState
+ }
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+// placeholderState consumes a placeholder value. The $ must have already has
+// already been consumed. The first rune must be a digit.
+func placeholderState(l *sqlLexer) stateFn {
+ num := 0
+
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if '0' <= r && r <= '9' {
+ num *= 10
+ num += int(r - '0')
+ } else {
+ l.parts = append(l.parts, num)
+ l.pos -= width
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func oneLineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\n', '\r':
+ return rawState
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+func multilineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ l.nested++
+ }
+ case '*':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '/' {
+ continue
+ }
+
+ l.pos += width
+ if l.nested == 0 {
+ return rawState
+ }
+ l.nested--
+
+ case utf8.RuneError:
+ if width != replacementcharacterwidth {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+ }
+}
+
+// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
+// as necessary. This function is only safe when standard_conforming_strings is
+// on.
+func SanitizeSQL(sql string, args ...any) (string, error) {
+ query, err := NewQuery(sql)
+ if err != nil {
+ return "", err
+ }
+ return query.Sanitize(args...)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
new file mode 100644
index 0000000..dec83f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
@@ -0,0 +1,112 @@
+package stmtcache
+
+import (
+ "container/list"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// LRUCache implements Cache with a Least Recently Used (LRU) cache.
+type LRUCache struct {
+ cap int
+ m map[string]*list.Element
+ l *list.List
+ invalidStmts []*pgconn.StatementDescription
+}
+
+// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
+func NewLRUCache(cap int) *LRUCache {
+ return &LRUCache{
+ cap: cap,
+ m: make(map[string]*list.Element),
+ l: list.New(),
+ }
+}
+
+// Get returns the statement description for sql. Returns nil if not found.
+func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
+ if el, ok := c.m[key]; ok {
+ c.l.MoveToFront(el)
+ return el.Value.(*pgconn.StatementDescription)
+ }
+
+ return nil
+
+}
+
+// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
+// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
+func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
+ if sd.SQL == "" {
+ panic("cannot store statement description with empty SQL")
+ }
+
+ if _, present := c.m[sd.SQL]; present {
+ return
+ }
+
+ // The statement may have been invalidated but not yet handled. Do not readd it to the cache.
+ for _, invalidSD := range c.invalidStmts {
+ if invalidSD.SQL == sd.SQL {
+ return
+ }
+ }
+
+ if c.l.Len() == c.cap {
+ c.invalidateOldest()
+ }
+
+ el := c.l.PushFront(sd)
+ c.m[sd.SQL] = el
+}
+
+// Invalidate invalidates statement description identified by sql. Does nothing if not found.
+func (c *LRUCache) Invalidate(sql string) {
+ if el, ok := c.m[sql]; ok {
+ delete(c.m, sql)
+ c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
+ c.l.Remove(el)
+ }
+}
+
+// InvalidateAll invalidates all statement descriptions.
+func (c *LRUCache) InvalidateAll() {
+ el := c.l.Front()
+ for el != nil {
+ c.invalidStmts = append(c.invalidStmts, el.Value.(*pgconn.StatementDescription))
+ el = el.Next()
+ }
+
+ c.m = make(map[string]*list.Element)
+ c.l = list.New()
+}
+
+// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription {
+ return c.invalidStmts
+}
+
+// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+// never seen by the call to GetInvalidated.
+func (c *LRUCache) RemoveInvalidated() {
+ c.invalidStmts = nil
+}
+
+// Len returns the number of cached prepared statement descriptions.
+func (c *LRUCache) Len() int {
+ return c.l.Len()
+}
+
+// Cap returns the maximum number of cached prepared statement descriptions.
+func (c *LRUCache) Cap() int {
+ return c.cap
+}
+
+func (c *LRUCache) invalidateOldest() {
+ oldest := c.l.Back()
+ sd := oldest.Value.(*pgconn.StatementDescription)
+ c.invalidStmts = append(c.invalidStmts, sd)
+ delete(c.m, sd.SQL)
+ c.l.Remove(oldest)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
new file mode 100644
index 0000000..d57bdd2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
@@ -0,0 +1,45 @@
+// Package stmtcache is a cache for statement descriptions.
+package stmtcache
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// StatementName returns a statement name that will be stable for sql across multiple connections and program
+// executions.
+func StatementName(sql string) string {
+ digest := sha256.Sum256([]byte(sql))
+ return "stmtcache_" + hex.EncodeToString(digest[0:24])
+}
+
+// Cache caches statement descriptions.
+type Cache interface {
+ // Get returns the statement description for sql. Returns nil if not found.
+ Get(sql string) *pgconn.StatementDescription
+
+ // Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
+ Put(sd *pgconn.StatementDescription)
+
+ // Invalidate invalidates statement description identified by sql. Does nothing if not found.
+ Invalidate(sql string)
+
+ // InvalidateAll invalidates all statement descriptions.
+ InvalidateAll()
+
+ // GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+ GetInvalidated() []*pgconn.StatementDescription
+
+ // RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+ // call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+ // never seen by the call to GetInvalidated.
+ RemoveInvalidated()
+
+ // Len returns the number of cached prepared statement descriptions.
+ Len() int
+
+ // Cap returns the maximum number of cached prepared statement descriptions.
+ Cap() int
+}
diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
new file mode 100644
index 0000000..6964132
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go
@@ -0,0 +1,77 @@
+package stmtcache
+
+import (
+ "math"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// UnlimitedCache implements Cache with no capacity limit.
+type UnlimitedCache struct {
+ m map[string]*pgconn.StatementDescription
+ invalidStmts []*pgconn.StatementDescription
+}
+
+// NewUnlimitedCache creates a new UnlimitedCache.
+func NewUnlimitedCache() *UnlimitedCache {
+ return &UnlimitedCache{
+ m: make(map[string]*pgconn.StatementDescription),
+ }
+}
+
+// Get returns the statement description for sql. Returns nil if not found.
+func (c *UnlimitedCache) Get(sql string) *pgconn.StatementDescription {
+ return c.m[sql]
+}
+
+// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
+func (c *UnlimitedCache) Put(sd *pgconn.StatementDescription) {
+ if sd.SQL == "" {
+ panic("cannot store statement description with empty SQL")
+ }
+
+ if _, present := c.m[sd.SQL]; present {
+ return
+ }
+
+ c.m[sd.SQL] = sd
+}
+
+// Invalidate invalidates statement description identified by sql. Does nothing if not found.
+func (c *UnlimitedCache) Invalidate(sql string) {
+ if sd, ok := c.m[sql]; ok {
+ delete(c.m, sql)
+ c.invalidStmts = append(c.invalidStmts, sd)
+ }
+}
+
+// InvalidateAll invalidates all statement descriptions.
+func (c *UnlimitedCache) InvalidateAll() {
+ for _, sd := range c.m {
+ c.invalidStmts = append(c.invalidStmts, sd)
+ }
+
+ c.m = make(map[string]*pgconn.StatementDescription)
+}
+
+// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated.
+func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription {
+ return c.invalidStmts
+}
+
+// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a
+// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were
+// never seen by the call to GetInvalidated.
+func (c *UnlimitedCache) RemoveInvalidated() {
+ c.invalidStmts = nil
+}
+
+// Len returns the number of cached prepared statement descriptions.
+func (c *UnlimitedCache) Len() int {
+ return len(c.m)
+}
+
+// Cap returns the maximum number of cached prepared statement descriptions.
+func (c *UnlimitedCache) Cap() int {
+ return math.MaxInt
+}
diff --git a/vendor/github.com/jackc/pgx/v5/large_objects.go b/vendor/github.com/jackc/pgx/v5/large_objects.go
new file mode 100644
index 0000000..9d21afd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/large_objects.go
@@ -0,0 +1,161 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of
+// PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data
+// in the message,maxLargeObjectMessageLength should be no larger than 1 GB - 1 KB.
+var maxLargeObjectMessageLength = 1024*1024*1024 - 1024
+
+// LargeObjects is a structure used to access the large objects API. It is only valid within the transaction where it
+// was created.
+//
+// For more details see: http://www.postgresql.org/docs/current/static/largeobjects.html
+type LargeObjects struct {
+ tx Tx
+}
+
+type LargeObjectMode int32
+
+const (
+ LargeObjectModeWrite LargeObjectMode = 0x20000
+ LargeObjectModeRead LargeObjectMode = 0x40000
+)
+
+// Create creates a new large object. If oid is zero, the server assigns an unused OID.
+func (o *LargeObjects) Create(ctx context.Context, oid uint32) (uint32, error) {
+ err := o.tx.QueryRow(ctx, "select lo_create($1)", oid).Scan(&oid)
+ return oid, err
+}
+
+// Open opens an existing large object with the given mode. ctx will also be used for all operations on the opened large
+// object.
+func (o *LargeObjects) Open(ctx context.Context, oid uint32, mode LargeObjectMode) (*LargeObject, error) {
+ var fd int32
+ err := o.tx.QueryRow(ctx, "select lo_open($1, $2)", oid, mode).Scan(&fd)
+ if err != nil {
+ return nil, err
+ }
+ return &LargeObject{fd: fd, tx: o.tx, ctx: ctx}, nil
+}
+
+// Unlink removes a large object from the database.
+func (o *LargeObjects) Unlink(ctx context.Context, oid uint32) error {
+ var result int32
+ err := o.tx.QueryRow(ctx, "select lo_unlink($1)", oid).Scan(&result)
+ if err != nil {
+ return err
+ }
+
+ if result != 1 {
+ return errors.New("failed to remove large object")
+ }
+
+ return nil
+}
+
+// A LargeObject is a large object stored on the server. It is only valid within the transaction that it was initialized
+// in. It uses the context it was initialized with for all operations. It implements these interfaces:
+//
+// io.Writer
+// io.Reader
+// io.Seeker
+// io.Closer
+type LargeObject struct {
+ ctx context.Context
+ tx Tx
+ fd int32
+}
+
+// Write writes p to the large object and returns the number of bytes written and an error if not all of p was written.
+func (o *LargeObject) Write(p []byte) (int, error) {
+ nTotal := 0
+ for {
+ expected := len(p) - nTotal
+ if expected == 0 {
+ break
+ } else if expected > maxLargeObjectMessageLength {
+ expected = maxLargeObjectMessageLength
+ }
+
+ var n int
+ err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p[nTotal:nTotal+expected]).Scan(&n)
+ if err != nil {
+ return nTotal, err
+ }
+
+ if n < 0 {
+ return nTotal, errors.New("failed to write to large object")
+ }
+
+ nTotal += n
+
+ if n < expected {
+ return nTotal, errors.New("short write to large object")
+ } else if n > expected {
+ return nTotal, errors.New("invalid write to large object")
+ }
+ }
+
+ return nTotal, nil
+}
+
+// Read reads up to len(p) bytes into p returning the number of bytes read.
+func (o *LargeObject) Read(p []byte) (int, error) {
+ nTotal := 0
+ for {
+ expected := len(p) - nTotal
+ if expected == 0 {
+ break
+ } else if expected > maxLargeObjectMessageLength {
+ expected = maxLargeObjectMessageLength
+ }
+
+ res := pgtype.PreallocBytes(p[nTotal:])
+ err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res)
+ // We compute expected so that it always fits into p, so it should never happen
+ // that PreallocBytes's ScanBytes had to allocate a new slice.
+ nTotal += len(res)
+ if err != nil {
+ return nTotal, err
+ }
+
+ if len(res) < expected {
+ return nTotal, io.EOF
+ } else if len(res) > expected {
+ return nTotal, errors.New("invalid read of large object")
+ }
+ }
+
+ return nTotal, nil
+}
+
+// Seek moves the current location pointer to the new location specified by offset.
+func (o *LargeObject) Seek(offset int64, whence int) (n int64, err error) {
+ err = o.tx.QueryRow(o.ctx, "select lo_lseek64($1, $2, $3)", o.fd, offset, whence).Scan(&n)
+ return n, err
+}
+
+// Tell returns the current read or write location of the large object descriptor.
+func (o *LargeObject) Tell() (n int64, err error) {
+ err = o.tx.QueryRow(o.ctx, "select lo_tell64($1)", o.fd).Scan(&n)
+ return n, err
+}
+
+// Truncate the large object to size.
+func (o *LargeObject) Truncate(size int64) (err error) {
+ _, err = o.tx.Exec(o.ctx, "select lo_truncate64($1, $2)", o.fd, size)
+ return err
+}
+
+// Close the large object descriptor.
+func (o *LargeObject) Close() error {
+ _, err := o.tx.Exec(o.ctx, "select lo_close($1)", o.fd)
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/named_args.go b/vendor/github.com/jackc/pgx/v5/named_args.go
new file mode 100644
index 0000000..c88991e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/named_args.go
@@ -0,0 +1,295 @@
+package pgx
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// NamedArgs can be used as the first argument to a query method. It will replace every '@' named placeholder with a '$'
+// ordinal placeholder and construct the appropriate arguments.
+//
+// For example, the following two queries are equivalent:
+//
+// conn.Query(ctx, "select * from widgets where foo = @foo and bar = @bar", pgx.NamedArgs{"foo": 1, "bar": 2})
+// conn.Query(ctx, "select * from widgets where foo = $1 and bar = $2", 1, 2)
+//
+// Named placeholders are case sensitive and must start with a letter or underscore. Subsequent characters can be
+// letters, numbers, or underscores.
+type NamedArgs map[string]any
+
+// RewriteQuery implements the QueryRewriter interface.
+func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
+ return rewriteQuery(na, sql, false)
+}
+
+// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all
+// named arguments that the sql query uses, and no extra arguments.
+type StrictNamedArgs map[string]any
+
+// RewriteQuery implements the QueryRewriter interface.
+func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
+ return rewriteQuery(sna, sql, true)
+}
+
+type namedArg string
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ nested int // multiline comment nesting level.
+ stateFn stateFn
+ parts []any
+
+ nameToOrdinal map[namedArg]int
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) {
+ l := &sqlLexer{
+ src: sql,
+ stateFn: rawState,
+ nameToOrdinal: make(map[namedArg]int, len(na)),
+ }
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ sb := strings.Builder{}
+ for _, p := range l.parts {
+ switch p := p.(type) {
+ case string:
+ sb.WriteString(p)
+ case namedArg:
+ sb.WriteRune('$')
+ sb.WriteString(strconv.Itoa(l.nameToOrdinal[p]))
+ }
+ }
+
+ newArgs = make([]any, len(l.nameToOrdinal))
+ for name, ordinal := range l.nameToOrdinal {
+ var found bool
+ newArgs[ordinal-1], found = na[string(name)]
+ if isStrict && !found {
+ return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
+ }
+ }
+
+ if isStrict {
+ for name := range na {
+ if _, found := l.nameToOrdinal[namedArg(name)]; !found {
+ return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name)
+ }
+ }
+ }
+
+ return sb.String(), newArgs, nil
+}
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '@':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if isLetter(nextRune) || nextRune == '_' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return namedArgState
+ }
+ case '-':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '-' {
+ l.pos += width
+ return oneLineCommentState
+ }
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ return multilineCommentState
+ }
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func isLetter(r rune) bool {
+ return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z')
+}
+
+func namedArgState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if r == utf8.RuneError {
+ if l.pos-l.start > 0 {
+ na := namedArg(l.src[l.start:l.pos])
+ if _, found := l.nameToOrdinal[na]; !found {
+ l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
+ }
+ l.parts = append(l.parts, na)
+ l.start = l.pos
+ }
+ return nil
+ } else if !(isLetter(r) || (r >= '0' && r <= '9') || r == '_') {
+ l.pos -= width
+ na := namedArg(l.src[l.start:l.pos])
+ if _, found := l.nameToOrdinal[na]; !found {
+ l.nameToOrdinal[na] = len(l.nameToOrdinal) + 1
+ }
+ l.parts = append(l.parts, namedArg(na))
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func oneLineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\n', '\r':
+ return rawState
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func multilineCommentState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '/':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '*' {
+ l.pos += width
+ l.nested++
+ }
+ case '*':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '/' {
+ continue
+ }
+
+ l.pos += width
+ if l.nested == 0 {
+ return rawState
+ }
+ l.nested--
+
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/README.md b/vendor/github.com/jackc/pgx/v5/pgconn/README.md
new file mode 100644
index 0000000..1fe15c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/README.md
@@ -0,0 +1,29 @@
+# pgconn
+
+Package pgconn is a low-level PostgreSQL database driver. It operates at nearly the same level as the C library libpq.
+It is primarily intended to serve as the foundation for higher level libraries such as https://github.com/jackc/pgx.
+Applications should handle normal queries with a higher level library and only use pgconn directly when required for
+low-level access to PostgreSQL functionality.
+
+## Example Usage
+
+```go
+pgConn, err := pgconn.Connect(context.Background(), os.Getenv("DATABASE_URL"))
+if err != nil {
+ log.Fatalln("pgconn failed to connect:", err)
+}
+defer pgConn.Close(context.Background())
+
+result := pgConn.ExecParams(context.Background(), "SELECT email FROM users WHERE id=$1", [][]byte{[]byte("123")}, nil, nil, nil)
+for result.NextRow() {
+ fmt.Println("User 123 has email:", string(result.Values()[0]))
+}
+_, err = result.Close()
+if err != nil {
+ log.Fatalln("failed reading result:", err)
+}
+```
+
+## Testing
+
+See CONTRIBUTING.md for setup instructions.
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go b/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
new file mode 100644
index 0000000..0649836
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
@@ -0,0 +1,272 @@
+// SCRAM-SHA-256 authentication
+//
+// Resources:
+// https://tools.ietf.org/html/rfc5802
+// https://tools.ietf.org/html/rfc8265
+// https://www.postgresql.org/docs/current/sasl-authentication.html
+//
+// Inspiration drawn from other implementations:
+// https://github.com/lib/pq/pull/608
+// https://github.com/lib/pq/pull/788
+// https://github.com/lib/pq/pull/833
+
+package pgconn
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/pgproto3"
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/text/secure/precis"
+)
+
+const clientNonceLen = 18
+
+// Perform SCRAM authentication.
+func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
+ sc, err := newScramClient(serverAuthMechanisms, c.config.Password)
+ if err != nil {
+ return err
+ }
+
+ // Send client-first-message in a SASLInitialResponse
+ saslInitialResponse := &pgproto3.SASLInitialResponse{
+ AuthMechanism: "SCRAM-SHA-256",
+ Data: sc.clientFirstMessage(),
+ }
+ c.frontend.Send(saslInitialResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+
+ // Receive server-first-message payload in an AuthenticationSASLContinue.
+ saslContinue, err := c.rxSASLContinue()
+ if err != nil {
+ return err
+ }
+ err = sc.recvServerFirstMessage(saslContinue.Data)
+ if err != nil {
+ return err
+ }
+
+ // Send client-final-message in a SASLResponse
+ saslResponse := &pgproto3.SASLResponse{
+ Data: []byte(sc.clientFinalMessage()),
+ }
+ c.frontend.Send(saslResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+
+ // Receive server-final-message payload in an AuthenticationSASLFinal.
+ saslFinal, err := c.rxSASLFinal()
+ if err != nil {
+ return err
+ }
+ return sc.recvServerFinalMessage(saslFinal.Data)
+}
+
+func (c *PgConn) rxSASLContinue() (*pgproto3.AuthenticationSASLContinue, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationSASLContinue:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationSASLContinue message but received unexpected message %T", msg)
+}
+
+func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationSASLFinal:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationSASLFinal message but received unexpected message %T", msg)
+}
+
+type scramClient struct {
+ serverAuthMechanisms []string
+ password []byte
+ clientNonce []byte
+
+ clientFirstMessageBare []byte
+
+ serverFirstMessage []byte
+ clientAndServerNonce []byte
+ salt []byte
+ iterations int
+
+ saltedPassword []byte
+ authMessage []byte
+}
+
+func newScramClient(serverAuthMechanisms []string, password string) (*scramClient, error) {
+ sc := &scramClient{
+ serverAuthMechanisms: serverAuthMechanisms,
+ }
+
+ // Ensure server supports SCRAM-SHA-256
+ hasScramSHA256 := false
+ for _, mech := range sc.serverAuthMechanisms {
+ if mech == "SCRAM-SHA-256" {
+ hasScramSHA256 = true
+ break
+ }
+ }
+ if !hasScramSHA256 {
+ return nil, errors.New("server does not support SCRAM-SHA-256")
+ }
+
+ // precis.OpaqueString is equivalent to SASLprep for password.
+ var err error
+ sc.password, err = precis.OpaqueString.Bytes([]byte(password))
+ if err != nil {
+ // PostgreSQL allows passwords invalid according to SCRAM / SASLprep.
+ sc.password = []byte(password)
+ }
+
+ buf := make([]byte, clientNonceLen)
+ _, err = rand.Read(buf)
+ if err != nil {
+ return nil, err
+ }
+ sc.clientNonce = make([]byte, base64.RawStdEncoding.EncodedLen(len(buf)))
+ base64.RawStdEncoding.Encode(sc.clientNonce, buf)
+
+ return sc, nil
+}
+
+func (sc *scramClient) clientFirstMessage() []byte {
+ sc.clientFirstMessageBare = []byte(fmt.Sprintf("n=,r=%s", sc.clientNonce))
+ return []byte(fmt.Sprintf("n,,%s", sc.clientFirstMessageBare))
+}
+
+func (sc *scramClient) recvServerFirstMessage(serverFirstMessage []byte) error {
+ sc.serverFirstMessage = serverFirstMessage
+ buf := serverFirstMessage
+ if !bytes.HasPrefix(buf, []byte("r=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include r=")
+ }
+ buf = buf[2:]
+
+ idx := bytes.IndexByte(buf, ',')
+ if idx == -1 {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
+ }
+ sc.clientAndServerNonce = buf[:idx]
+ buf = buf[idx+1:]
+
+ if !bytes.HasPrefix(buf, []byte("s=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include s=")
+ }
+ buf = buf[2:]
+
+ idx = bytes.IndexByte(buf, ',')
+ if idx == -1 {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
+ }
+ saltStr := buf[:idx]
+ buf = buf[idx+1:]
+
+ if !bytes.HasPrefix(buf, []byte("i=")) {
+ return errors.New("invalid SCRAM server-first-message received from server: did not include i=")
+ }
+ buf = buf[2:]
+ iterationsStr := buf
+
+ var err error
+ sc.salt, err = base64.StdEncoding.DecodeString(string(saltStr))
+ if err != nil {
+ return fmt.Errorf("invalid SCRAM salt received from server: %w", err)
+ }
+
+ sc.iterations, err = strconv.Atoi(string(iterationsStr))
+ if err != nil || sc.iterations <= 0 {
+ return fmt.Errorf("invalid SCRAM iteration count received from server: %w", err)
+ }
+
+ if !bytes.HasPrefix(sc.clientAndServerNonce, sc.clientNonce) {
+ return errors.New("invalid SCRAM nonce: did not start with client nonce")
+ }
+
+ if len(sc.clientAndServerNonce) <= len(sc.clientNonce) {
+ return errors.New("invalid SCRAM nonce: did not include server nonce")
+ }
+
+ return nil
+}
+
+func (sc *scramClient) clientFinalMessage() string {
+ clientFinalMessageWithoutProof := []byte(fmt.Sprintf("c=biws,r=%s", sc.clientAndServerNonce))
+
+ sc.saltedPassword = pbkdf2.Key([]byte(sc.password), sc.salt, sc.iterations, 32, sha256.New)
+ sc.authMessage = bytes.Join([][]byte{sc.clientFirstMessageBare, sc.serverFirstMessage, clientFinalMessageWithoutProof}, []byte(","))
+
+ clientProof := computeClientProof(sc.saltedPassword, sc.authMessage)
+
+ return fmt.Sprintf("%s,p=%s", clientFinalMessageWithoutProof, clientProof)
+}
+
+func (sc *scramClient) recvServerFinalMessage(serverFinalMessage []byte) error {
+ if !bytes.HasPrefix(serverFinalMessage, []byte("v=")) {
+ return errors.New("invalid SCRAM server-final-message received from server")
+ }
+
+ serverSignature := serverFinalMessage[2:]
+
+ if !hmac.Equal(serverSignature, computeServerSignature(sc.saltedPassword, sc.authMessage)) {
+ return errors.New("invalid SCRAM ServerSignature received from server")
+ }
+
+ return nil
+}
+
+func computeHMAC(key, msg []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ mac.Write(msg)
+ return mac.Sum(nil)
+}
+
+func computeClientProof(saltedPassword, authMessage []byte) []byte {
+ clientKey := computeHMAC(saltedPassword, []byte("Client Key"))
+ storedKey := sha256.Sum256(clientKey)
+ clientSignature := computeHMAC(storedKey[:], authMessage)
+
+ clientProof := make([]byte, len(clientSignature))
+ for i := 0; i < len(clientSignature); i++ {
+ clientProof[i] = clientKey[i] ^ clientSignature[i]
+ }
+
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(clientProof)))
+ base64.StdEncoding.Encode(buf, clientProof)
+ return buf
+}
+
+func computeServerSignature(saltedPassword []byte, authMessage []byte) []byte {
+ serverKey := computeHMAC(saltedPassword, []byte("Server Key"))
+ serverSignature := computeHMAC(serverKey, authMessage)
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(serverSignature)))
+ base64.StdEncoding.Encode(buf, serverSignature)
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/config.go b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
new file mode 100644
index 0000000..598917f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
@@ -0,0 +1,918 @@
+package pgconn
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgpassfile"
+ "github.com/jackc/pgservicefile"
+ "github.com/jackc/pgx/v5/pgconn/ctxwatch"
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+type AfterConnectFunc func(ctx context.Context, pgconn *PgConn) error
+type ValidateConnectFunc func(ctx context.Context, pgconn *PgConn) error
+type GetSSLPasswordFunc func(ctx context.Context) string
+
+// Config is the settings used to establish a connection to a PostgreSQL server. It must be created by [ParseConfig]. A
+// manually initialized Config will cause ConnectConfig to panic.
+type Config struct {
+ Host string // host (e.g. localhost) or absolute path to unix domain socket directory (e.g. /private/tmp)
+ Port uint16
+ Database string
+ User string
+ Password string
+ TLSConfig *tls.Config // nil disables TLS
+ ConnectTimeout time.Duration
+ DialFunc DialFunc // e.g. net.Dialer.DialContext
+ LookupFunc LookupFunc // e.g. net.Resolver.LookupHost
+ BuildFrontend BuildFrontendFunc
+
+ // BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called
+ // when a context passed to a PgConn method is canceled.
+ BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler
+
+ RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+
+ KerberosSrvName string
+ KerberosSpn string
+ Fallbacks []*FallbackConfig
+
+ // ValidateConnect is called during a connection attempt after a successful authentication with the PostgreSQL server.
+ // It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next
+ // fallback config is tried. This allows implementing high availability behavior such as libpq does with target_session_attrs.
+ ValidateConnect ValidateConnectFunc
+
+ // AfterConnect is called after ValidateConnect. It can be used to set up the connection (e.g. Set session variables
+ // or prepare statements). If this returns an error the connection attempt fails.
+ AfterConnect AfterConnectFunc
+
+ // OnNotice is a callback function called when a notice response is received.
+ OnNotice NoticeHandler
+
+ // OnNotification is a callback function called when a notification from the LISTEN/NOTIFY system is received.
+ OnNotification NotificationHandler
+
+ // OnPgError is a callback function called when a Postgres error is received by the server. The default handler will close
+ // the connection on any FATAL errors. If you override this handler you should call the previously set handler or ensure
+ // that you close on FATAL errors by returning false.
+ OnPgError PgErrorHandler
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// ParseConfigOptions contains options that control how a config is built such as GetSSLPassword.
+type ParseConfigOptions struct {
+ // GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function
+ // PQsetSSLKeyPassHook_OpenSSL.
+ GetSSLPassword GetSSLPasswordFunc
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the TLSConfig field:
+// according to the tls.Config docs it must not be modified after creation.
+func (c *Config) Copy() *Config {
+ newConf := new(Config)
+ *newConf = *c
+ if newConf.TLSConfig != nil {
+ newConf.TLSConfig = c.TLSConfig.Clone()
+ }
+ if newConf.RuntimeParams != nil {
+ newConf.RuntimeParams = make(map[string]string, len(c.RuntimeParams))
+ for k, v := range c.RuntimeParams {
+ newConf.RuntimeParams[k] = v
+ }
+ }
+ if newConf.Fallbacks != nil {
+ newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))
+ for i, fallback := range c.Fallbacks {
+ newFallback := new(FallbackConfig)
+ *newFallback = *fallback
+ if newFallback.TLSConfig != nil {
+ newFallback.TLSConfig = fallback.TLSConfig.Clone()
+ }
+ newConf.Fallbacks[i] = newFallback
+ }
+ }
+ return newConf
+}
+
+// FallbackConfig is additional settings to attempt a connection with when the primary Config fails to establish a
+// network connection. It is used for TLS fallback such as sslmode=prefer and high availability (HA) connections.
+type FallbackConfig struct {
+ Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp)
+ Port uint16
+ TLSConfig *tls.Config // nil disables TLS
+}
+
+// connectOneConfig is the configuration for a single attempt to connect to a single host.
+type connectOneConfig struct {
+ network string
+ address string
+ originalHostname string // original hostname before resolving
+ tlsConfig *tls.Config // nil disables TLS
+}
+
+// isAbsolutePath checks if the provided value is an absolute path either
+// beginning with a forward slash (as on Linux-based systems) or with a capital
+// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
+func isAbsolutePath(path string) bool {
+ isWindowsPath := func(p string) bool {
+ if len(p) < 3 {
+ return false
+ }
+ drive := p[0]
+ colon := p[1]
+ backslash := p[2]
+ if drive >= 'A' && drive <= 'Z' && colon == ':' && backslash == '\\' {
+ return true
+ }
+ return false
+ }
+ return strings.HasPrefix(path, "/") || isWindowsPath(path)
+}
+
+// NetworkAddress converts a PostgreSQL host and port into network and address suitable for use with
+// net.Dial.
+func NetworkAddress(host string, port uint16) (network, address string) {
+ if isAbsolutePath(host) {
+ network = "unix"
+ address = filepath.Join(host, ".s.PGSQL.") + strconv.FormatInt(int64(port), 10)
+ } else {
+ network = "tcp"
+ address = net.JoinHostPort(host, strconv.Itoa(int(port)))
+ }
+ return network, address
+}
+
+// ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It
+// uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely
+// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See
+// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty
+// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
+//
+// # Example Keyword/Value
+// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca
+//
+// # Example URL
+// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca
+//
+// The returned *Config may be modified. However, it is strongly recommended that any configuration that can be done
+// through the connection string be done there. In particular the fields Host, Port, TLSConfig, and Fallbacks can be
+// interdependent (e.g. TLSConfig needs knowledge of the host to validate the server certificate). These fields should
+// not be modified individually. They should all be modified or all left unchanged.
+//
+// ParseConfig supports specifying multiple hosts in similar manner to libpq. Host and port may include comma separated
+// values that will be tried in order. This can be used as part of a high availability system. See
+// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS for more information.
+//
+// # Example URL
+// postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb
+//
+// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed
+// via database URL or keyword/value:
+//
+// PGHOST
+// PGPORT
+// PGDATABASE
+// PGUSER
+// PGPASSWORD
+// PGPASSFILE
+// PGSERVICE
+// PGSERVICEFILE
+// PGSSLMODE
+// PGSSLCERT
+// PGSSLKEY
+// PGSSLROOTCERT
+// PGSSLPASSWORD
+// PGAPPNAME
+// PGCONNECT_TIMEOUT
+// PGTARGETSESSIONATTRS
+//
+// See http://www.postgresql.org/docs/11/static/libpq-envars.html for details on the meaning of environment variables.
+//
+// See https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-PARAMKEYWORDS for parameter key word names. They are
+// usually but not always the environment variable name downcased and without the "PG" prefix.
+//
+// Important Security Notes:
+//
+// ParseConfig tries to match libpq behavior with regard to PGSSLMODE. This includes defaulting to "prefer" behavior if
+// not set.
+//
+// See http://www.postgresql.org/docs/11/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION for details on what level of
+// security each sslmode provides.
+//
+// The sslmode "prefer" (the default), sslmode "allow", and multiple hosts are implemented via the Fallbacks field of
+// the Config struct. If TLSConfig is manually changed it will not affect the fallbacks. For example, in the case of
+// sslmode "prefer" this means it will first try the main Config settings which use TLS, then it will try the fallback
+// which does not use TLS. This can lead to an unexpected unencrypted connection if the main TLS config is manually
+// changed later but the unencrypted fallback is present. Ensure there are no stale fallbacks when manually setting
+// TLSConfig.
+//
+// Other known differences with libpq:
+//
+// When multiple hosts are specified, libpq allows them to have different passwords set via the .pgpass file. pgconn
+// does not.
+//
+// In addition, ParseConfig accepts the following options:
+//
+// - servicefile.
+// libpq only reads servicefile from the PGSERVICEFILE environment variable. ParseConfig accepts servicefile as a
+// part of the connection string.
+func ParseConfig(connString string) (*Config, error) {
+ var parseConfigOptions ParseConfigOptions
+ return ParseConfigWithOptions(connString, parseConfigOptions)
+}
+
+// ParseConfigWithOptions builds a *Config from connString and options with similar behavior to the PostgreSQL standard
+// C library libpq. options contains settings that cannot be specified in a connString such as providing a function to
+// get the SSL password.
+func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Config, error) {
+ defaultSettings := defaultSettings()
+ envSettings := parseEnvSettings()
+
+ connStringSettings := make(map[string]string)
+ if connString != "" {
+ var err error
+ // connString may be a database URL or in PostgreSQL keyword/value format
+ if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
+ connStringSettings, err = parseURLSettings(connString)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err}
+ }
+ } else {
+ connStringSettings, err = parseKeywordValueSettings(connString)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err}
+ }
+ }
+ }
+
+ settings := mergeSettings(defaultSettings, envSettings, connStringSettings)
+ if service, present := settings["service"]; present {
+ serviceSettings, err := parseServiceSettings(settings["servicefile"], service)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to read service", err: err}
+ }
+
+ settings = mergeSettings(defaultSettings, envSettings, serviceSettings, connStringSettings)
+ }
+
+ config := &Config{
+ createdByParseConfig: true,
+ Database: settings["database"],
+ User: settings["user"],
+ Password: settings["password"],
+ RuntimeParams: make(map[string]string),
+ BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
+ return pgproto3.NewFrontend(r, w)
+ },
+ BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
+ return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
+ },
+ OnPgError: func(_ *PgConn, pgErr *PgError) bool {
+ // we want to automatically close any fatal errors
+ if strings.EqualFold(pgErr.Severity, "FATAL") {
+ return false
+ }
+ return true
+ },
+ }
+
+ if connectTimeoutSetting, present := settings["connect_timeout"]; present {
+ connectTimeout, err := parseConnectTimeoutSetting(connectTimeoutSetting)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "invalid connect_timeout", err: err}
+ }
+ config.ConnectTimeout = connectTimeout
+ config.DialFunc = makeConnectTimeoutDialFunc(connectTimeout)
+ } else {
+ defaultDialer := makeDefaultDialer()
+ config.DialFunc = defaultDialer.DialContext
+ }
+
+ config.LookupFunc = makeDefaultResolver().LookupHost
+
+ notRuntimeParams := map[string]struct{}{
+ "host": {},
+ "port": {},
+ "database": {},
+ "user": {},
+ "password": {},
+ "passfile": {},
+ "connect_timeout": {},
+ "sslmode": {},
+ "sslkey": {},
+ "sslcert": {},
+ "sslrootcert": {},
+ "sslpassword": {},
+ "sslsni": {},
+ "krbspn": {},
+ "krbsrvname": {},
+ "target_session_attrs": {},
+ "service": {},
+ "servicefile": {},
+ }
+
+ // Adding kerberos configuration
+ if _, present := settings["krbsrvname"]; present {
+ config.KerberosSrvName = settings["krbsrvname"]
+ }
+ if _, present := settings["krbspn"]; present {
+ config.KerberosSpn = settings["krbspn"]
+ }
+
+ for k, v := range settings {
+ if _, present := notRuntimeParams[k]; present {
+ continue
+ }
+ config.RuntimeParams[k] = v
+ }
+
+ fallbacks := []*FallbackConfig{}
+
+ hosts := strings.Split(settings["host"], ",")
+ ports := strings.Split(settings["port"], ",")
+
+ for i, host := range hosts {
+ var portStr string
+ if i < len(ports) {
+ portStr = ports[i]
+ } else {
+ portStr = ports[0]
+ }
+
+ port, err := parsePort(portStr)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "invalid port", err: err}
+ }
+
+ var tlsConfigs []*tls.Config
+
+ // Ignore TLS settings if Unix domain socket like libpq
+ if network, _ := NetworkAddress(host, port); network == "unix" {
+ tlsConfigs = append(tlsConfigs, nil)
+ } else {
+ var err error
+ tlsConfigs, err = configTLS(settings, host, options)
+ if err != nil {
+ return nil, &ParseConfigError{ConnString: connString, msg: "failed to configure TLS", err: err}
+ }
+ }
+
+ for _, tlsConfig := range tlsConfigs {
+ fallbacks = append(fallbacks, &FallbackConfig{
+ Host: host,
+ Port: port,
+ TLSConfig: tlsConfig,
+ })
+ }
+ }
+
+ config.Host = fallbacks[0].Host
+ config.Port = fallbacks[0].Port
+ config.TLSConfig = fallbacks[0].TLSConfig
+ config.Fallbacks = fallbacks[1:]
+
+ passfile, err := pgpassfile.ReadPassfile(settings["passfile"])
+ if err == nil {
+ if config.Password == "" {
+ host := config.Host
+ if network, _ := NetworkAddress(config.Host, config.Port); network == "unix" {
+ host = "localhost"
+ }
+
+ config.Password = passfile.FindPassword(host, strconv.Itoa(int(config.Port)), config.Database, config.User)
+ }
+ }
+
+ switch tsa := settings["target_session_attrs"]; tsa {
+ case "read-write":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsReadWrite
+ case "read-only":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsReadOnly
+ case "primary":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsPrimary
+ case "standby":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsStandby
+ case "prefer-standby":
+ config.ValidateConnect = ValidateConnectTargetSessionAttrsPreferStandby
+ case "any":
+ // do nothing
+ default:
+ return nil, &ParseConfigError{ConnString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", tsa)}
+ }
+
+ return config, nil
+}
+
+func mergeSettings(settingSets ...map[string]string) map[string]string {
+ settings := make(map[string]string)
+
+ for _, s2 := range settingSets {
+ for k, v := range s2 {
+ settings[k] = v
+ }
+ }
+
+ return settings
+}
+
+func parseEnvSettings() map[string]string {
+ settings := make(map[string]string)
+
+ nameMap := map[string]string{
+ "PGHOST": "host",
+ "PGPORT": "port",
+ "PGDATABASE": "database",
+ "PGUSER": "user",
+ "PGPASSWORD": "password",
+ "PGPASSFILE": "passfile",
+ "PGAPPNAME": "application_name",
+ "PGCONNECT_TIMEOUT": "connect_timeout",
+ "PGSSLMODE": "sslmode",
+ "PGSSLKEY": "sslkey",
+ "PGSSLCERT": "sslcert",
+ "PGSSLSNI": "sslsni",
+ "PGSSLROOTCERT": "sslrootcert",
+ "PGSSLPASSWORD": "sslpassword",
+ "PGTARGETSESSIONATTRS": "target_session_attrs",
+ "PGSERVICE": "service",
+ "PGSERVICEFILE": "servicefile",
+ }
+
+ for envname, realname := range nameMap {
+ value := os.Getenv(envname)
+ if value != "" {
+ settings[realname] = value
+ }
+ }
+
+ return settings
+}
+
+func parseURLSettings(connString string) (map[string]string, error) {
+ settings := make(map[string]string)
+
+ url, err := url.Parse(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ if url.User != nil {
+ settings["user"] = url.User.Username()
+ if password, present := url.User.Password(); present {
+ settings["password"] = password
+ }
+ }
+
+ // Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
+ var hosts []string
+ var ports []string
+ for _, host := range strings.Split(url.Host, ",") {
+ if host == "" {
+ continue
+ }
+ if isIPOnly(host) {
+ hosts = append(hosts, strings.Trim(host, "[]"))
+ continue
+ }
+ h, p, err := net.SplitHostPort(host)
+ if err != nil {
+ return nil, fmt.Errorf("failed to split host:port in '%s', err: %w", host, err)
+ }
+ if h != "" {
+ hosts = append(hosts, h)
+ }
+ if p != "" {
+ ports = append(ports, p)
+ }
+ }
+ if len(hosts) > 0 {
+ settings["host"] = strings.Join(hosts, ",")
+ }
+ if len(ports) > 0 {
+ settings["port"] = strings.Join(ports, ",")
+ }
+
+ database := strings.TrimLeft(url.Path, "/")
+ if database != "" {
+ settings["database"] = database
+ }
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ for k, v := range url.Query() {
+ if k2, present := nameMap[k]; present {
+ k = k2
+ }
+
+ settings[k] = v[0]
+ }
+
+ return settings, nil
+}
+
+func isIPOnly(host string) bool {
+ return net.ParseIP(strings.Trim(host, "[]")) != nil || !strings.Contains(host, ":")
+}
+
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+func parseKeywordValueSettings(s string) (map[string]string, error) {
+ settings := make(map[string]string)
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ for len(s) > 0 {
+ var key, val string
+ eqIdx := strings.IndexRune(s, '=')
+ if eqIdx < 0 {
+ return nil, errors.New("invalid keyword/value")
+ }
+
+ key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
+ s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f")
+ if len(s) == 0 {
+ } else if s[0] != '\'' {
+ end := 0
+ for ; end < len(s); end++ {
+ if asciiSpace[s[end]] == 1 {
+ break
+ }
+ if s[end] == '\\' {
+ end++
+ if end == len(s) {
+ return nil, errors.New("invalid backslash")
+ }
+ }
+ }
+ val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ if end == len(s) {
+ s = ""
+ } else {
+ s = s[end+1:]
+ }
+ } else { // quoted string
+ s = s[1:]
+ end := 0
+ for ; end < len(s); end++ {
+ if s[end] == '\'' {
+ break
+ }
+ if s[end] == '\\' {
+ end++
+ }
+ }
+ if end == len(s) {
+ return nil, errors.New("unterminated quoted string in connection info string")
+ }
+ val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ if end == len(s) {
+ s = ""
+ } else {
+ s = s[end+1:]
+ }
+ }
+
+ if k, ok := nameMap[key]; ok {
+ key = k
+ }
+
+ if key == "" {
+ return nil, errors.New("invalid keyword/value")
+ }
+
+ settings[key] = val
+ }
+
+ return settings, nil
+}
+
+func parseServiceSettings(servicefilePath, serviceName string) (map[string]string, error) {
+ servicefile, err := pgservicefile.ReadServicefile(servicefilePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read service file: %v", servicefilePath)
+ }
+
+ service, err := servicefile.GetService(serviceName)
+ if err != nil {
+ return nil, fmt.Errorf("unable to find service: %v", serviceName)
+ }
+
+ nameMap := map[string]string{
+ "dbname": "database",
+ }
+
+ settings := make(map[string]string, len(service.Settings))
+ for k, v := range service.Settings {
+ if k2, present := nameMap[k]; present {
+ k = k2
+ }
+ settings[k] = v
+ }
+
+ return settings, nil
+}
+
+// configTLS uses libpq's TLS parameters to construct []*tls.Config. It is
+// necessary to allow returning multiple TLS configs as sslmode "allow" and
+// "prefer" allow fallback.
+func configTLS(settings map[string]string, thisHost string, parseConfigOptions ParseConfigOptions) ([]*tls.Config, error) {
+ host := thisHost
+ sslmode := settings["sslmode"]
+ sslrootcert := settings["sslrootcert"]
+ sslcert := settings["sslcert"]
+ sslkey := settings["sslkey"]
+ sslpassword := settings["sslpassword"]
+ sslsni := settings["sslsni"]
+
+ // Match libpq default behavior
+ if sslmode == "" {
+ sslmode = "prefer"
+ }
+ if sslsni == "" {
+ sslsni = "1"
+ }
+
+ tlsConfig := &tls.Config{}
+
+ switch sslmode {
+ case "disable":
+ return []*tls.Config{nil}, nil
+ case "allow", "prefer":
+ tlsConfig.InsecureSkipVerify = true
+ case "require":
+ // According to PostgreSQL documentation, if a root CA file exists,
+ // the behavior of sslmode=require should be the same as that of verify-ca
+ //
+ // See https://www.postgresql.org/docs/12/libpq-ssl.html
+ if sslrootcert != "" {
+ goto nextCase
+ }
+ tlsConfig.InsecureSkipVerify = true
+ break
+ nextCase:
+ fallthrough
+ case "verify-ca":
+ // Don't perform the default certificate verification because it
+ // will verify the hostname. Instead, verify the server's
+ // certificate chain ourselves in VerifyPeerCertificate and
+ // ignore the server name. This emulates libpq's verify-ca
+ // behavior.
+ //
+ // See https://github.com/golang/go/issues/21971#issuecomment-332693931
+ // and https://pkg.go.dev/crypto/tls?tab=doc#example-Config-VerifyPeerCertificate
+ // for more info.
+ tlsConfig.InsecureSkipVerify = true
+ tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {
+ certs := make([]*x509.Certificate, len(certificates))
+ for i, asn1Data := range certificates {
+ cert, err := x509.ParseCertificate(asn1Data)
+ if err != nil {
+ return errors.New("failed to parse certificate from server: " + err.Error())
+ }
+ certs[i] = cert
+ }
+
+ // Leave DNSName empty to skip hostname verification.
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ Intermediates: x509.NewCertPool(),
+ }
+ // Skip the first cert because it's the leaf. All others
+ // are intermediates.
+ for _, cert := range certs[1:] {
+ opts.Intermediates.AddCert(cert)
+ }
+ _, err := certs[0].Verify(opts)
+ return err
+ }
+ case "verify-full":
+ tlsConfig.ServerName = host
+ default:
+ return nil, errors.New("sslmode is invalid")
+ }
+
+ if sslrootcert != "" {
+ caCertPool := x509.NewCertPool()
+
+ caPath := sslrootcert
+ caCert, err := os.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CA file: %w", err)
+ }
+
+ if !caCertPool.AppendCertsFromPEM(caCert) {
+ return nil, errors.New("unable to add CA to cert pool")
+ }
+
+ tlsConfig.RootCAs = caCertPool
+ tlsConfig.ClientCAs = caCertPool
+ }
+
+ if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
+ return nil, errors.New(`both "sslcert" and "sslkey" are required`)
+ }
+
+ if sslcert != "" && sslkey != "" {
+ buf, err := os.ReadFile(sslkey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read sslkey: %w", err)
+ }
+ block, _ := pem.Decode(buf)
+ if block == nil {
+ return nil, errors.New("failed to decode sslkey")
+ }
+ var pemKey []byte
+ var decryptedKey []byte
+ var decryptedError error
+ // If PEM is encrypted, attempt to decrypt using pass phrase
+ if x509.IsEncryptedPEMBlock(block) {
+ // Attempt decryption with pass phrase
+ // NOTE: only supports RSA (PKCS#1)
+ if sslpassword != "" {
+ decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
+ }
+ //if sslpassword not provided or has decryption error when use it
+ //try to find sslpassword with callback function
+ if sslpassword == "" || decryptedError != nil {
+ if parseConfigOptions.GetSSLPassword != nil {
+ sslpassword = parseConfigOptions.GetSSLPassword(context.Background())
+ }
+ if sslpassword == "" {
+ return nil, fmt.Errorf("unable to find sslpassword")
+ }
+ }
+ decryptedKey, decryptedError = x509.DecryptPEMBlock(block, []byte(sslpassword))
+ // Should we also provide warning for PKCS#1 needed?
+ if decryptedError != nil {
+ return nil, fmt.Errorf("unable to decrypt key: %w", err)
+ }
+
+ pemBytes := pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: decryptedKey,
+ }
+ pemKey = pem.EncodeToMemory(&pemBytes)
+ } else {
+ pemKey = pem.EncodeToMemory(block)
+ }
+ certfile, err := os.ReadFile(sslcert)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read cert: %w", err)
+ }
+ cert, err := tls.X509KeyPair(certfile, pemKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load cert: %w", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ // Set Server Name Indication (SNI), if enabled by connection parameters.
+ // Per RFC 6066, do not set it if the host is a literal IP address (IPv4
+ // or IPv6).
+ if sslsni == "1" && net.ParseIP(host) == nil {
+ tlsConfig.ServerName = host
+ }
+
+ switch sslmode {
+ case "allow":
+ return []*tls.Config{nil, tlsConfig}, nil
+ case "prefer":
+ return []*tls.Config{tlsConfig, nil}, nil
+ case "require", "verify-ca", "verify-full":
+ return []*tls.Config{tlsConfig}, nil
+ default:
+ panic("BUG: bad sslmode should already have been caught")
+ }
+}
+
+func parsePort(s string) (uint16, error) {
+ port, err := strconv.ParseUint(s, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ if port < 1 || port > math.MaxUint16 {
+ return 0, errors.New("outside range")
+ }
+ return uint16(port), nil
+}
+
+func makeDefaultDialer() *net.Dialer {
+ // rely on GOLANG KeepAlive settings
+ return &net.Dialer{}
+}
+
+func makeDefaultResolver() *net.Resolver {
+ return net.DefaultResolver
+}
+
+func parseConnectTimeoutSetting(s string) (time.Duration, error) {
+ timeout, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ if timeout < 0 {
+ return 0, errors.New("negative timeout")
+ }
+ return time.Duration(timeout) * time.Second, nil
+}
+
+func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
+ d := makeDefaultDialer()
+ d.Timeout = timeout
+ return d.DialContext
+}
+
+// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=read-write.
+func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
+ result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
+ if result.Err != nil {
+ return result.Err
+ }
+
+ if string(result.Rows[0][0]) == "on" {
+ return errors.New("read only connection")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=read-only.
+func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
+ result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
+ if result.Err != nil {
+ return result.Err
+ }
+
+ if string(result.Rows[0][0]) != "on" {
+ return errors.New("connection is not read only")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=standby.
+func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
+ result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
+ if result.Err != nil {
+ return result.Err
+ }
+
+ if string(result.Rows[0][0]) != "t" {
+ return errors.New("server is not in hot standby mode")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=primary.
+func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
+ result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
+ if result.Err != nil {
+ return result.Err
+ }
+
+ if string(result.Rows[0][0]) == "t" {
+ return errors.New("server is in standby mode")
+ }
+
+ return nil
+}
+
+// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
+// target_session_attrs=prefer-standby.
+func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
+ result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
+ if result.Err != nil {
+ return result.Err
+ }
+
+ if string(result.Rows[0][0]) != "t" {
+ return &NotPreferredError{err: errors.New("server is not in hot standby mode")}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go
new file mode 100644
index 0000000..db8884e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/ctxwatch/context_watcher.go
@@ -0,0 +1,80 @@
+package ctxwatch
+
+import (
+ "context"
+ "sync"
+)
+
+// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a
+// time.
+type ContextWatcher struct {
+ handler Handler
+ unwatchChan chan struct{}
+
+ lock sync.Mutex
+ watchInProgress bool
+ onCancelWasCalled bool
+}
+
+// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.
+// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and
+// onCancel called.
+func NewContextWatcher(handler Handler) *ContextWatcher {
+ cw := &ContextWatcher{
+ handler: handler,
+ unwatchChan: make(chan struct{}),
+ }
+
+ return cw
+}
+
+// Watch starts watching ctx. If ctx is canceled then the onCancel function passed to NewContextWatcher will be called.
+func (cw *ContextWatcher) Watch(ctx context.Context) {
+ cw.lock.Lock()
+ defer cw.lock.Unlock()
+
+ if cw.watchInProgress {
+ panic("Watch already in progress")
+ }
+
+ cw.onCancelWasCalled = false
+
+ if ctx.Done() != nil {
+ cw.watchInProgress = true
+ go func() {
+ select {
+ case <-ctx.Done():
+ cw.handler.HandleCancel(ctx)
+ cw.onCancelWasCalled = true
+ <-cw.unwatchChan
+ case <-cw.unwatchChan:
+ }
+ }()
+ } else {
+ cw.watchInProgress = false
+ }
+}
+
+// Unwatch stops watching the previously watched context. If the onCancel function passed to NewContextWatcher was
+// called then onUnwatchAfterCancel will also be called.
+func (cw *ContextWatcher) Unwatch() {
+ cw.lock.Lock()
+ defer cw.lock.Unlock()
+
+ if cw.watchInProgress {
+ cw.unwatchChan <- struct{}{}
+ if cw.onCancelWasCalled {
+ cw.handler.HandleUnwatchAfterCancel()
+ }
+ cw.watchInProgress = false
+ }
+}
+
+type Handler interface {
+ // HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the
+ // context that was canceled.
+ HandleCancel(canceledCtx context.Context)
+
+ // HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched.
+ HandleUnwatchAfterCancel()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go b/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go
new file mode 100644
index 0000000..1dd514f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/defaults.go
@@ -0,0 +1,63 @@
+//go:build !windows
+// +build !windows
+
+package pgconn
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+)
+
+func defaultSettings() map[string]string {
+ settings := make(map[string]string)
+
+ settings["host"] = defaultHost()
+ settings["port"] = "5432"
+
+ // Default to the OS user name. Purposely ignoring err getting user name from
+ // OS. The client application will simply have to specify the user in that
+ // case (which they typically will be doing anyway).
+ user, err := user.Current()
+ if err == nil {
+ settings["user"] = user.Username
+ settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass")
+ settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
+ sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
+ sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+ if _, err := os.Stat(sslcert); err == nil {
+ if _, err := os.Stat(sslkey); err == nil {
+ // Both the cert and key must be present to use them, or do not use either
+ settings["sslcert"] = sslcert
+ settings["sslkey"] = sslkey
+ }
+ }
+ sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt")
+ if _, err := os.Stat(sslrootcert); err == nil {
+ settings["sslrootcert"] = sslrootcert
+ }
+ }
+
+ settings["target_session_attrs"] = "any"
+
+ return settings
+}
+
+// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
+// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
+// checks the existence of common locations.
+func defaultHost() string {
+ candidatePaths := []string{
+ "/var/run/postgresql", // Debian
+ "/private/tmp", // OSX - homebrew
+ "/tmp", // standard PostgreSQL
+ }
+
+ for _, path := range candidatePaths {
+ if _, err := os.Stat(path); err == nil {
+ return path
+ }
+ }
+
+ return "localhost"
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go b/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go
new file mode 100644
index 0000000..33b4a1f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/defaults_windows.go
@@ -0,0 +1,57 @@
+package pgconn
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "strings"
+)
+
+func defaultSettings() map[string]string {
+ settings := make(map[string]string)
+
+ settings["host"] = defaultHost()
+ settings["port"] = "5432"
+
+ // Default to the OS user name. Purposely ignoring err getting user name from
+ // OS. The client application will simply have to specify the user in that
+ // case (which they typically will be doing anyway).
+ user, err := user.Current()
+ appData := os.Getenv("APPDATA")
+ if err == nil {
+ // Windows gives us the username here as `DOMAIN\user` or `LOCALPCNAME\user`,
+ // but the libpq default is just the `user` portion, so we strip off the first part.
+ username := user.Username
+ if strings.Contains(username, "\\") {
+ username = username[strings.LastIndex(username, "\\")+1:]
+ }
+
+ settings["user"] = username
+ settings["passfile"] = filepath.Join(appData, "postgresql", "pgpass.conf")
+ settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
+ sslcert := filepath.Join(appData, "postgresql", "postgresql.crt")
+ sslkey := filepath.Join(appData, "postgresql", "postgresql.key")
+ if _, err := os.Stat(sslcert); err == nil {
+ if _, err := os.Stat(sslkey); err == nil {
+ // Both the cert and key must be present to use them, or do not use either
+ settings["sslcert"] = sslcert
+ settings["sslkey"] = sslkey
+ }
+ }
+ sslrootcert := filepath.Join(appData, "postgresql", "root.crt")
+ if _, err := os.Stat(sslrootcert); err == nil {
+ settings["sslrootcert"] = sslrootcert
+ }
+ }
+
+ settings["target_session_attrs"] = "any"
+
+ return settings
+}
+
+// defaultHost attempts to mimic libpq's default host. libpq uses the default unix socket location on *nix and localhost
+// on Windows. The default socket location is compiled into libpq. Since pgx does not have access to that default it
+// checks the existence of common locations.
+func defaultHost() string {
+ return "localhost"
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/doc.go b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go
new file mode 100644
index 0000000..7013750
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/doc.go
@@ -0,0 +1,38 @@
+// Package pgconn is a low-level PostgreSQL database driver.
+/*
+pgconn provides lower level access to a PostgreSQL connection than a database/sql or pgx connection. It operates at
+nearly the same level is the C library libpq.
+
+Establishing a Connection
+
+Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the
+environment for libpq style environment variables.
+
+Executing a Query
+
+ExecParams and ExecPrepared execute a single query. They return readers that iterate over each row. The Read method
+reads all rows into memory.
+
+Executing Multiple Queries in a Single Round Trip
+
+Exec and ExecBatch can execute multiple queries in a single round trip. They return readers that iterate over each query
+result. The ReadAll method reads all query results into memory.
+
+Pipeline Mode
+
+Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of
+exactly how many and when network round trips occur.
+
+Context Support
+
+All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the
+method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can
+be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior.
+This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is
+a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before
+interrupting the query in such a way as to close the connection.
+
+The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the
+client to abort.
+*/
+package pgconn
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/errors.go b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go
new file mode 100644
index 0000000..ec4a6d4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/errors.go
@@ -0,0 +1,248 @@
+package pgconn
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+)
+
+// SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server.
+func SafeToRetry(err error) bool {
+ var retryableErr interface{ SafeToRetry() bool }
+ if errors.As(err, &retryableErr) {
+ return retryableErr.SafeToRetry()
+ }
+ return false
+}
+
+// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
+// context.DeadlineExceeded or an implementer of net.Error where Timeout() is true.
+func Timeout(err error) bool {
+ var timeoutErr *errTimeout
+ return errors.As(err, &timeoutErr)
+}
+
+// PgError represents an error reported by the PostgreSQL server. See
+// http://www.postgresql.org/docs/11/static/protocol-error-fields.html for
+// detailed field description.
+type PgError struct {
+ Severity string
+ SeverityUnlocalized string
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+}
+
+func (pe *PgError) Error() string {
+ return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
+}
+
+// SQLState returns the SQLState of the error.
+func (pe *PgError) SQLState() string {
+ return pe.Code
+}
+
+// ConnectError is the error returned when a connection attempt fails.
+type ConnectError struct {
+ Config *Config // The configuration that was used in the connection attempt.
+ err error
+}
+
+func (e *ConnectError) Error() string {
+ prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database)
+ details := e.err.Error()
+ if strings.Contains(details, "\n") {
+ return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t")
+ } else {
+ return prefix + " " + details
+ }
+}
+
+func (e *ConnectError) Unwrap() error {
+ return e.err
+}
+
+type perDialConnectError struct {
+ address string
+ originalHostname string
+ err error
+}
+
+func (e *perDialConnectError) Error() string {
+ return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error())
+}
+
+func (e *perDialConnectError) Unwrap() error {
+ return e.err
+}
+
+type connLockError struct {
+ status string
+}
+
+func (e *connLockError) SafeToRetry() bool {
+ return true // a lock failure by definition happens before the connection is used.
+}
+
+func (e *connLockError) Error() string {
+ return e.status
+}
+
+// ParseConfigError is the error returned when a connection string cannot be parsed.
+type ParseConfigError struct {
+ ConnString string // The connection string that could not be parsed.
+ msg string
+ err error
+}
+
+func (e *ParseConfigError) Error() string {
+ // Now that ParseConfigError is public and ConnString is available to the developer, perhaps it would be better only
+ // return a static string. That would ensure that the error message cannot leak a password. The ConnString field would
+ // allow access to the original string if desired and Unwrap would allow access to the underlying error.
+ connString := redactPW(e.ConnString)
+ if e.err == nil {
+ return fmt.Sprintf("cannot parse `%s`: %s", connString, e.msg)
+ }
+ return fmt.Sprintf("cannot parse `%s`: %s (%s)", connString, e.msg, e.err.Error())
+}
+
+func (e *ParseConfigError) Unwrap() error {
+ return e.err
+}
+
+func normalizeTimeoutError(ctx context.Context, err error) error {
+ var netErr net.Error
+ if errors.As(err, &netErr) && netErr.Timeout() {
+ if ctx.Err() == context.Canceled {
+ // Since the timeout was caused by a context cancellation, the actual error is context.Canceled not the timeout error.
+ return context.Canceled
+ } else if ctx.Err() == context.DeadlineExceeded {
+ return &errTimeout{err: ctx.Err()}
+ } else {
+ return &errTimeout{err: netErr}
+ }
+ }
+ return err
+}
+
+type pgconnError struct {
+ msg string
+ err error
+ safeToRetry bool
+}
+
+func (e *pgconnError) Error() string {
+ if e.msg == "" {
+ return e.err.Error()
+ }
+ if e.err == nil {
+ return e.msg
+ }
+ return fmt.Sprintf("%s: %s", e.msg, e.err.Error())
+}
+
+func (e *pgconnError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *pgconnError) Unwrap() error {
+ return e.err
+}
+
+// errTimeout occurs when an error was caused by a timeout. Specifically, it wraps an error which is
+// context.Canceled, context.DeadlineExceeded, or an implementer of net.Error where Timeout() is true.
+type errTimeout struct {
+ err error
+}
+
+func (e *errTimeout) Error() string {
+ return fmt.Sprintf("timeout: %s", e.err.Error())
+}
+
+func (e *errTimeout) SafeToRetry() bool {
+ return SafeToRetry(e.err)
+}
+
+func (e *errTimeout) Unwrap() error {
+ return e.err
+}
+
+type contextAlreadyDoneError struct {
+ err error
+}
+
+func (e *contextAlreadyDoneError) Error() string {
+ return fmt.Sprintf("context already done: %s", e.err.Error())
+}
+
+func (e *contextAlreadyDoneError) SafeToRetry() bool {
+ return true
+}
+
+func (e *contextAlreadyDoneError) Unwrap() error {
+ return e.err
+}
+
+// newContextAlreadyDoneError double-wraps a context error in `contextAlreadyDoneError` and `errTimeout`.
+func newContextAlreadyDoneError(ctx context.Context) (err error) {
+ return &errTimeout{&contextAlreadyDoneError{err: ctx.Err()}}
+}
+
+func redactPW(connString string) string {
+ if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
+ if u, err := url.Parse(connString); err == nil {
+ return redactURL(u)
+ }
+ }
+ quotedKV := regexp.MustCompile(`password='[^']*'`)
+ connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx")
+ plainKV := regexp.MustCompile(`password=[^ ]*`)
+ connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx")
+ brokenURL := regexp.MustCompile(`:[^:@]+?@`)
+ connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@")
+ return connString
+}
+
+func redactURL(u *url.URL) string {
+ if u == nil {
+ return ""
+ }
+ if _, pwSet := u.User.Password(); pwSet {
+ u.User = url.UserPassword(u.User.Username(), "xxxxx")
+ }
+ return u.String()
+}
+
+type NotPreferredError struct {
+ err error
+ safeToRetry bool
+}
+
+func (e *NotPreferredError) Error() string {
+ return fmt.Sprintf("standby server not found: %s", e.err.Error())
+}
+
+func (e *NotPreferredError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *NotPreferredError) Unwrap() error {
+ return e.err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go b/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go
new file mode 100644
index 0000000..e65c2c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/internal/bgreader/bgreader.go
@@ -0,0 +1,139 @@
+// Package bgreader provides a io.Reader that can optionally buffer reads in the background.
+package bgreader
+
+import (
+ "io"
+ "sync"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+)
+
+const (
+ StatusStopped = iota
+ StatusRunning
+ StatusStopping
+)
+
+// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
+type BGReader struct {
+ r io.Reader
+
+ cond *sync.Cond
+ status int32
+ readResults []readResult
+}
+
+type readResult struct {
+ buf *[]byte
+ err error
+}
+
+// Start starts the backgrounder reader. If the background reader is already running this is a no-op. The background
+// reader will stop automatically when the underlying reader returns an error.
+func (r *BGReader) Start() {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ switch r.status {
+ case StatusStopped:
+ r.status = StatusRunning
+ go r.bgRead()
+ case StatusRunning:
+ // no-op
+ case StatusStopping:
+ r.status = StatusRunning
+ }
+}
+
+// Stop tells the background reader to stop after the in progress Read returns. It is safe to call Stop when the
+// background reader is not running.
+func (r *BGReader) Stop() {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ switch r.status {
+ case StatusStopped:
+ // no-op
+ case StatusRunning:
+ r.status = StatusStopping
+ case StatusStopping:
+ // no-op
+ }
+}
+
+// Status returns the current status of the background reader.
+func (r *BGReader) Status() int32 {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+ return r.status
+}
+
+func (r *BGReader) bgRead() {
+ keepReading := true
+ for keepReading {
+ buf := iobufpool.Get(8192)
+ n, err := r.r.Read(*buf)
+ *buf = (*buf)[:n]
+
+ r.cond.L.Lock()
+ r.readResults = append(r.readResults, readResult{buf: buf, err: err})
+ if r.status == StatusStopping || err != nil {
+ r.status = StatusStopped
+ keepReading = false
+ }
+ r.cond.L.Unlock()
+ r.cond.Broadcast()
+ }
+}
+
+// Read implements the io.Reader interface.
+func (r *BGReader) Read(p []byte) (int, error) {
+ r.cond.L.Lock()
+ defer r.cond.L.Unlock()
+
+ if len(r.readResults) > 0 {
+ return r.readFromReadResults(p)
+ }
+
+ // There are no unread background read results and the background reader is stopped.
+ if r.status == StatusStopped {
+ return r.r.Read(p)
+ }
+
+ // Wait for results from the background reader
+ for len(r.readResults) == 0 {
+ r.cond.Wait()
+ }
+ return r.readFromReadResults(p)
+}
+
+// readBackgroundResults reads a result previously read by the background reader. r.cond.L must be held.
+func (r *BGReader) readFromReadResults(p []byte) (int, error) {
+ buf := r.readResults[0].buf
+ var err error
+
+ n := copy(p, *buf)
+ if n == len(*buf) {
+ err = r.readResults[0].err
+ iobufpool.Put(buf)
+ if len(r.readResults) == 1 {
+ r.readResults = nil
+ } else {
+ r.readResults = r.readResults[1:]
+ }
+ } else {
+ *buf = (*buf)[n:]
+ r.readResults[0].buf = buf
+ }
+
+ return n, err
+}
+
+func New(r io.Reader) *BGReader {
+ return &BGReader{
+ r: r,
+ cond: &sync.Cond{
+ L: &sync.Mutex{},
+ },
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go b/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go
new file mode 100644
index 0000000..3c1af34
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/krb5.go
@@ -0,0 +1,100 @@
+package pgconn
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+// NewGSSFunc creates a GSS authentication provider, for use with
+// RegisterGSSProvider.
+type NewGSSFunc func() (GSS, error)
+
+var newGSS NewGSSFunc
+
+// RegisterGSSProvider registers a GSS authentication provider. For example, if
+// you need to use Kerberos to authenticate with your server, add this to your
+// main package:
+//
+// import "github.com/otan/gopgkrb5"
+//
+// func init() {
+// pgconn.RegisterGSSProvider(func() (pgconn.GSS, error) { return gopgkrb5.NewGSS() })
+// }
+func RegisterGSSProvider(newGSSArg NewGSSFunc) {
+ newGSS = newGSSArg
+}
+
+// GSS provides GSSAPI authentication (e.g., Kerberos).
+type GSS interface {
+ GetInitToken(host string, service string) ([]byte, error)
+ GetInitTokenFromSPN(spn string) ([]byte, error)
+ Continue(inToken []byte) (done bool, outToken []byte, err error)
+}
+
+func (c *PgConn) gssAuth() error {
+ if newGSS == nil {
+ return errors.New("kerberos error: no GSSAPI provider registered, see https://github.com/otan/gopgkrb5")
+ }
+ cli, err := newGSS()
+ if err != nil {
+ return err
+ }
+
+ var nextData []byte
+ if c.config.KerberosSpn != "" {
+ // Use the supplied SPN if provided.
+ nextData, err = cli.GetInitTokenFromSPN(c.config.KerberosSpn)
+ } else {
+ // Allow the kerberos service name to be overridden
+ service := "postgres"
+ if c.config.KerberosSrvName != "" {
+ service = c.config.KerberosSrvName
+ }
+ nextData, err = cli.GetInitToken(c.config.Host, service)
+ }
+ if err != nil {
+ return err
+ }
+
+ for {
+ gssResponse := &pgproto3.GSSResponse{
+ Data: nextData,
+ }
+ c.frontend.Send(gssResponse)
+ err = c.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ return err
+ }
+ resp, err := c.rxGSSContinue()
+ if err != nil {
+ return err
+ }
+ var done bool
+ done, nextData, err = cli.Continue(resp.Data)
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ }
+ return nil
+}
+
+func (c *PgConn) rxGSSContinue() (*pgproto3.AuthenticationGSSContinue, error) {
+ msg, err := c.receiveMessage()
+ if err != nil {
+ return nil, err
+ }
+
+ switch m := msg.(type) {
+ case *pgproto3.AuthenticationGSSContinue:
+ return m, nil
+ case *pgproto3.ErrorResponse:
+ return nil, ErrorResponseToPgError(m)
+ }
+
+ return nil, fmt.Errorf("expected AuthenticationGSSContinue message but received unexpected message %T", msg)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
new file mode 100644
index 0000000..7efb522
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
@@ -0,0 +1,2346 @@
+package pgconn
+
+import (
+ "context"
+ "crypto/md5"
+ "crypto/tls"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgconn/ctxwatch"
+ "github.com/jackc/pgx/v5/pgconn/internal/bgreader"
+ "github.com/jackc/pgx/v5/pgproto3"
+)
+
+const (
+ connStatusUninitialized = iota
+ connStatusConnecting
+ connStatusClosed
+ connStatusIdle
+ connStatusBusy
+)
+
+// Notice represents a notice response message reported by the PostgreSQL server. Be aware that this is distinct from
+// LISTEN/NOTIFY notification.
+type Notice PgError
+
+// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system
+type Notification struct {
+ PID uint32 // backend pid that sent the notification
+ Channel string // channel from which notification was received
+ Payload string
+}
+
+// DialFunc is a function that can be used to connect to a PostgreSQL server.
+type DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
+
+// LookupFunc is a function that can be used to lookup IPs addrs from host. Optionally an ip:port combination can be
+// returned in order to override the connection string's port.
+type LookupFunc func(ctx context.Context, host string) (addrs []string, err error)
+
+// BuildFrontendFunc is a function that can be used to create Frontend implementation for connection.
+type BuildFrontendFunc func(r io.Reader, w io.Writer) *pgproto3.Frontend
+
+// PgErrorHandler is a function that handles errors returned from Postgres. This function must return true to keep
+// the connection open. Returning false will cause the connection to be closed immediately. You should return
+// false on any FATAL-severity errors. This will not receive network errors. The *PgConn is provided so the handler is
+// aware of the origin of the error, but it must not invoke any query method.
+type PgErrorHandler func(*PgConn, *PgError) bool
+
+// NoticeHandler is a function that can handle notices received from the PostgreSQL server. Notices can be received at
+// any time, usually during handling of a query response. The *PgConn is provided so the handler is aware of the origin
+// of the notice, but it must not invoke any query method. Be aware that this is distinct from LISTEN/NOTIFY
+// notification.
+type NoticeHandler func(*PgConn, *Notice)
+
+// NotificationHandler is a function that can handle notifications received from the PostgreSQL server. Notifications
+// can be received at any time, usually during handling of a query response. The *PgConn is provided so the handler is
+// aware of the origin of the notice, but it must not invoke any query method. Be aware that this is distinct from a
+// notice event.
+type NotificationHandler func(*PgConn, *Notification)
+
+// PgConn is a low-level PostgreSQL connection handle. It is not safe for concurrent usage.
+type PgConn struct {
+ conn net.Conn
+ pid uint32 // backend pid
+ secretKey uint32 // key to use to send a cancel query message to the server
+ parameterStatuses map[string]string // parameters that have been reported by the server
+ txStatus byte
+ frontend *pgproto3.Frontend
+ bgReader *bgreader.BGReader
+ slowWriteTimer *time.Timer
+ bgReaderStarted chan struct{}
+
+ customData map[string]any
+
+ config *Config
+
+ status byte // One of connStatus* constants
+
+ bufferingReceive bool
+ bufferingReceiveMux sync.Mutex
+ bufferingReceiveMsg pgproto3.BackendMessage
+ bufferingReceiveErr error
+
+ peekedMsg pgproto3.BackendMessage
+
+ // Reusable / preallocated resources
+ resultReader ResultReader
+ multiResultReader MultiResultReader
+ pipeline Pipeline
+ contextWatcher *ctxwatch.ContextWatcher
+ fieldDescriptions [16]FieldDescription
+
+ cleanupDone chan struct{}
+}
+
+// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
+// format) to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a
+// connect attempt.
+func Connect(ctx context.Context, connString string) (*PgConn, error) {
+ config, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConnectConfig(ctx, config)
+}
+
+// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
+// format) and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details.
+// ctx can be used to cancel a connect attempt.
+func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptions ParseConfigOptions) (*PgConn, error) {
+ config, err := ParseConfigWithOptions(connString, parseConfigOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ return ConnectConfig(ctx, config)
+}
+
+// Connect establishes a connection to a PostgreSQL server using config. config must have been constructed with
+// [ParseConfig]. ctx can be used to cancel a connect attempt.
+//
+// If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An
+// authentication error will terminate the chain of attempts (like libpq:
+// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error.
+func ConnectConfig(ctx context.Context, config *Config) (*PgConn, error) {
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ var allErrors []error
+
+ connectConfigs, errs := buildConnectOneConfigs(ctx, config)
+ if len(errs) > 0 {
+ allErrors = append(allErrors, errs...)
+ }
+
+ if len(connectConfigs) == 0 {
+ return nil, &ConnectError{Config: config, err: fmt.Errorf("hostname resolving error: %w", errors.Join(allErrors...))}
+ }
+
+ pgConn, errs := connectPreferred(ctx, config, connectConfigs)
+ if len(errs) > 0 {
+ allErrors = append(allErrors, errs...)
+ return nil, &ConnectError{Config: config, err: errors.Join(allErrors...)}
+ }
+
+ if config.AfterConnect != nil {
+ err := config.AfterConnect(ctx, pgConn)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, &ConnectError{Config: config, err: fmt.Errorf("AfterConnect error: %w", err)}
+ }
+ }
+
+ return pgConn, nil
+}
+
+// buildConnectOneConfigs resolves hostnames and builds a list of connectOneConfigs to try connecting to. It returns a
+// slice of successfully resolved connectOneConfigs and a slice of errors. It is possible for both slices to contain
+// values if some hosts were successfully resolved and others were not.
+func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneConfig, []error) {
+ // Simplify usage by treating primary config and fallbacks the same.
+ fallbackConfigs := []*FallbackConfig{
+ {
+ Host: config.Host,
+ Port: config.Port,
+ TLSConfig: config.TLSConfig,
+ },
+ }
+ fallbackConfigs = append(fallbackConfigs, config.Fallbacks...)
+
+ var configs []*connectOneConfig
+
+ var allErrors []error
+
+ for _, fb := range fallbackConfigs {
+ // skip resolve for unix sockets
+ if isAbsolutePath(fb.Host) {
+ network, address := NetworkAddress(fb.Host, fb.Port)
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+
+ continue
+ }
+
+ ips, err := config.LookupFunc(ctx, fb.Host)
+ if err != nil {
+ allErrors = append(allErrors, err)
+ continue
+ }
+
+ for _, ip := range ips {
+ splitIP, splitPort, err := net.SplitHostPort(ip)
+ if err == nil {
+ port, err := strconv.ParseUint(splitPort, 10, 16)
+ if err != nil {
+ return nil, []error{fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)}
+ }
+ network, address := NetworkAddress(splitIP, uint16(port))
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+ } else {
+ network, address := NetworkAddress(ip, fb.Port)
+ configs = append(configs, &connectOneConfig{
+ network: network,
+ address: address,
+ originalHostname: fb.Host,
+ tlsConfig: fb.TLSConfig,
+ })
+ }
+ }
+ }
+
+ return configs, allErrors
+}
+
+// connectPreferred attempts to connect to the preferred host from connectOneConfigs. The connections are attempted in
+// order. If a connection is successful it is returned. If no connection is successful then all errors are returned. If
+// a connection attempt returns a [NotPreferredError], then that host will be used if no other hosts are successful.
+func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*connectOneConfig) (*PgConn, []error) {
+ octx := ctx
+ var allErrors []error
+
+ var fallbackConnectOneConfig *connectOneConfig
+ for i, c := range connectOneConfigs {
+ // ConnectTimeout restricts the whole connection process.
+ if config.ConnectTimeout != 0 {
+ // create new context first time or when previous host was different
+ if i == 0 || (connectOneConfigs[i].address != connectOneConfigs[i-1].address) {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout)
+ defer cancel()
+ }
+ } else {
+ ctx = octx
+ }
+
+ pgConn, err := connectOne(ctx, config, c, false)
+ if pgConn != nil {
+ return pgConn, nil
+ }
+
+ allErrors = append(allErrors, err)
+
+ var pgErr *PgError
+ if errors.As(err, &pgErr) {
+ const ERRCODE_INVALID_PASSWORD = "28P01" // wrong password
+ const ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION = "28000" // wrong password or bad pg_hba.conf settings
+ const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
+ const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
+ if pgErr.Code == ERRCODE_INVALID_PASSWORD ||
+ pgErr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && c.tlsConfig != nil ||
+ pgErr.Code == ERRCODE_INVALID_CATALOG_NAME ||
+ pgErr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE {
+ return nil, allErrors
+ }
+ }
+
+ var npErr *NotPreferredError
+ if errors.As(err, &npErr) {
+ fallbackConnectOneConfig = c
+ }
+ }
+
+ if fallbackConnectOneConfig != nil {
+ pgConn, err := connectOne(ctx, config, fallbackConnectOneConfig, true)
+ if err == nil {
+ return pgConn, nil
+ }
+ allErrors = append(allErrors, err)
+ }
+
+ return nil, allErrors
+}
+
+// connectOne makes one connection attempt to a single host.
+func connectOne(ctx context.Context, config *Config, connectConfig *connectOneConfig,
+ ignoreNotPreferredErr bool,
+) (*PgConn, error) {
+ pgConn := new(PgConn)
+ pgConn.config = config
+ pgConn.cleanupDone = make(chan struct{})
+ pgConn.customData = make(map[string]any)
+
+ var err error
+
+ newPerDialConnectError := func(msg string, err error) *perDialConnectError {
+ err = normalizeTimeoutError(ctx, err)
+ e := &perDialConnectError{address: connectConfig.address, originalHostname: connectConfig.originalHostname, err: fmt.Errorf("%s: %w", msg, err)}
+ return e
+ }
+
+ pgConn.conn, err = config.DialFunc(ctx, connectConfig.network, connectConfig.address)
+ if err != nil {
+ return nil, newPerDialConnectError("dial error", err)
+ }
+
+ if connectConfig.tlsConfig != nil {
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: pgConn.conn})
+ pgConn.contextWatcher.Watch(ctx)
+ tlsConn, err := startTLS(pgConn.conn, connectConfig.tlsConfig)
+ pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS.
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("tls error", err)
+ }
+
+ pgConn.conn = tlsConn
+ }
+
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(config.BuildContextWatcherHandler(pgConn))
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+
+ pgConn.parameterStatuses = make(map[string]string)
+ pgConn.status = connStatusConnecting
+ pgConn.bgReader = bgreader.New(pgConn.conn)
+ pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
+ func() {
+ pgConn.bgReader.Start()
+ pgConn.bgReaderStarted <- struct{}{}
+ },
+ )
+ pgConn.slowWriteTimer.Stop()
+ pgConn.bgReaderStarted = make(chan struct{})
+ pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
+
+ startupMsg := pgproto3.StartupMessage{
+ ProtocolVersion: pgproto3.ProtocolVersionNumber,
+ Parameters: make(map[string]string),
+ }
+
+ // Copy default run-time params
+ for k, v := range config.RuntimeParams {
+ startupMsg.Parameters[k] = v
+ }
+
+ startupMsg.Parameters["user"] = config.User
+ if config.Database != "" {
+ startupMsg.Parameters["database"] = config.Database
+ }
+
+ pgConn.frontend.Send(&startupMsg)
+ if err := pgConn.flushWithPotentialWriteReadDeadlock(); err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write startup message", err)
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.conn.Close()
+ if err, ok := err.(*PgError); ok {
+ return nil, newPerDialConnectError("server error", err)
+ }
+ return nil, newPerDialConnectError("failed to receive message", err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.BackendKeyData:
+ pgConn.pid = msg.ProcessID
+ pgConn.secretKey = msg.SecretKey
+
+ case *pgproto3.AuthenticationOk:
+ case *pgproto3.AuthenticationCleartextPassword:
+ err = pgConn.txPasswordMessage(pgConn.config.Password)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write password message", err)
+ }
+ case *pgproto3.AuthenticationMD5Password:
+ digestedPassword := "md5" + hexMD5(hexMD5(pgConn.config.Password+pgConn.config.User)+string(msg.Salt[:]))
+ err = pgConn.txPasswordMessage(digestedPassword)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed to write password message", err)
+ }
+ case *pgproto3.AuthenticationSASL:
+ err = pgConn.scramAuth(msg.AuthMechanisms)
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed SASL auth", err)
+ }
+ case *pgproto3.AuthenticationGSS:
+ err = pgConn.gssAuth()
+ if err != nil {
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("failed GSS auth", err)
+ }
+ case *pgproto3.ReadyForQuery:
+ pgConn.status = connStatusIdle
+ if config.ValidateConnect != nil {
+ // ValidateConnect may execute commands that cause the context to be watched again. Unwatch first to avoid
+ // the watch already in progress panic. This is that last thing done by this method so there is no need to
+ // restart the watch after ValidateConnect returns.
+ //
+ // See https://github.com/jackc/pgconn/issues/40.
+ pgConn.contextWatcher.Unwatch()
+
+ err := config.ValidateConnect(ctx, pgConn)
+ if err != nil {
+ if _, ok := err.(*NotPreferredError); ignoreNotPreferredErr && ok {
+ return pgConn, nil
+ }
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("ValidateConnect failed", err)
+ }
+ }
+ return pgConn, nil
+ case *pgproto3.ParameterStatus, *pgproto3.NoticeResponse:
+ // handled by ReceiveMessage
+ case *pgproto3.ErrorResponse:
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("server error", ErrorResponseToPgError(msg))
+ default:
+ pgConn.conn.Close()
+ return nil, newPerDialConnectError("received unexpected message", err)
+ }
+ }
+}
+
+func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
+ err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103})
+ if err != nil {
+ return nil, err
+ }
+
+ response := make([]byte, 1)
+ if _, err = io.ReadFull(conn, response); err != nil {
+ return nil, err
+ }
+
+ if response[0] != 'S' {
+ return nil, errors.New("server refused TLS connection")
+ }
+
+ return tls.Client(conn, tlsConfig), nil
+}
+
+func (pgConn *PgConn) txPasswordMessage(password string) (err error) {
+ pgConn.frontend.Send(&pgproto3.PasswordMessage{Password: password})
+ return pgConn.flushWithPotentialWriteReadDeadlock()
+}
+
+func hexMD5(s string) string {
+ hash := md5.New()
+ io.WriteString(hash, s)
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+func (pgConn *PgConn) signalMessage() chan struct{} {
+ if pgConn.bufferingReceive {
+ panic("BUG: signalMessage when already in progress")
+ }
+
+ pgConn.bufferingReceive = true
+ pgConn.bufferingReceiveMux.Lock()
+
+ ch := make(chan struct{})
+ go func() {
+ pgConn.bufferingReceiveMsg, pgConn.bufferingReceiveErr = pgConn.frontend.Receive()
+ pgConn.bufferingReceiveMux.Unlock()
+ close(ch)
+ }()
+
+ return ch
+}
+
+// ReceiveMessage receives one wire protocol message from the PostgreSQL server. It must only be used when the
+// connection is not busy. e.g. It is an error to call ReceiveMessage while reading the result of a query. The messages
+// are still handled by the core pgconn message handling system so receiving a NotificationResponse will still trigger
+// the OnNotification callback.
+//
+// This is a very low level method that requires deep understanding of the PostgreSQL wire protocol to use correctly.
+// See https://www.postgresql.org/docs/current/protocol.html.
+func (pgConn *PgConn) ReceiveMessage(ctx context.Context) (pgproto3.BackendMessage, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return nil, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ err = &pgconnError{
+ msg: "receive message failed",
+ err: normalizeTimeoutError(ctx, err),
+ safeToRetry: true,
+ }
+ }
+ return msg, err
+}
+
+// peekMessage peeks at the next message without setting up context cancellation.
+func (pgConn *PgConn) peekMessage() (pgproto3.BackendMessage, error) {
+ if pgConn.peekedMsg != nil {
+ return pgConn.peekedMsg, nil
+ }
+
+ var msg pgproto3.BackendMessage
+ var err error
+ if pgConn.bufferingReceive {
+ pgConn.bufferingReceiveMux.Lock()
+ msg = pgConn.bufferingReceiveMsg
+ err = pgConn.bufferingReceiveErr
+ pgConn.bufferingReceiveMux.Unlock()
+ pgConn.bufferingReceive = false
+
+ // If a timeout error happened in the background try the read again.
+ var netErr net.Error
+ if errors.As(err, &netErr) && netErr.Timeout() {
+ msg, err = pgConn.frontend.Receive()
+ }
+ } else {
+ msg, err = pgConn.frontend.Receive()
+ }
+
+ if err != nil {
+ // Close on anything other than timeout error - everything else is fatal
+ var netErr net.Error
+ isNetErr := errors.As(err, &netErr)
+ if !(isNetErr && netErr.Timeout()) {
+ pgConn.asyncClose()
+ }
+
+ return nil, err
+ }
+
+ pgConn.peekedMsg = msg
+ return msg, nil
+}
+
+// receiveMessage receives a message without setting up context cancellation
+func (pgConn *PgConn) receiveMessage() (pgproto3.BackendMessage, error) {
+ msg, err := pgConn.peekMessage()
+ if err != nil {
+ return nil, err
+ }
+ pgConn.peekedMsg = nil
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ pgConn.txStatus = msg.TxStatus
+ case *pgproto3.ParameterStatus:
+ pgConn.parameterStatuses[msg.Name] = msg.Value
+ case *pgproto3.ErrorResponse:
+ err := ErrorResponseToPgError(msg)
+ if pgConn.config.OnPgError != nil && !pgConn.config.OnPgError(pgConn, err) {
+ pgConn.status = connStatusClosed
+ pgConn.conn.Close() // Ignore error as the connection is already broken and there is already an error to return.
+ close(pgConn.cleanupDone)
+ return nil, err
+ }
+ case *pgproto3.NoticeResponse:
+ if pgConn.config.OnNotice != nil {
+ pgConn.config.OnNotice(pgConn, noticeResponseToNotice(msg))
+ }
+ case *pgproto3.NotificationResponse:
+ if pgConn.config.OnNotification != nil {
+ pgConn.config.OnNotification(pgConn, &Notification{PID: msg.PID, Channel: msg.Channel, Payload: msg.Payload})
+ }
+ }
+
+ return msg, nil
+}
+
+// Conn returns the underlying net.Conn. This rarely necessary. If the connection will be directly used for reading or
+// writing then SyncConn should usually be called before Conn.
+func (pgConn *PgConn) Conn() net.Conn {
+ return pgConn.conn
+}
+
+// PID returns the backend PID.
+func (pgConn *PgConn) PID() uint32 {
+ return pgConn.pid
+}
+
+// TxStatus returns the current TxStatus as reported by the server in the ReadyForQuery message.
+//
+// Possible return values:
+//
+// 'I' - idle / not in transaction
+// 'T' - in a transaction
+// 'E' - in a failed transaction
+//
+// See https://www.postgresql.org/docs/current/protocol-message-formats.html.
+func (pgConn *PgConn) TxStatus() byte {
+ return pgConn.txStatus
+}
+
+// SecretKey returns the backend secret key used to send a cancel query message to the server.
+func (pgConn *PgConn) SecretKey() uint32 {
+ return pgConn.secretKey
+}
+
+// Frontend returns the underlying *pgproto3.Frontend. This rarely necessary.
+func (pgConn *PgConn) Frontend() *pgproto3.Frontend {
+ return pgConn.frontend
+}
+
+// Close closes a connection. It is safe to call Close on an already closed connection. Close attempts a clean close by
+// sending the exit message to PostgreSQL. However, this could block so ctx is available to limit the time to wait. The
+// underlying net.Conn.Close() will always be called regardless of any other errors.
+func (pgConn *PgConn) Close(ctx context.Context) error {
+ if pgConn.status == connStatusClosed {
+ return nil
+ }
+ pgConn.status = connStatusClosed
+
+ defer close(pgConn.cleanupDone)
+ defer pgConn.conn.Close()
+
+ if ctx != context.Background() {
+ // Close may be called while a cancellable query is in progress. This will most often be triggered by panic when
+ // a defer closes the connection (possibly indirectly via a transaction or a connection pool). Unwatch to end any
+ // previous watch. It is safe to Unwatch regardless of whether a watch is already is progress.
+ //
+ // See https://github.com/jackc/pgconn/issues/29
+ pgConn.contextWatcher.Unwatch()
+
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Ignore any errors sending Terminate message and waiting for server to close connection.
+ // This mimics the behavior of libpq PQfinish. It calls closePGconn which calls sendTerminateConn which purposefully
+ // ignores errors.
+ //
+ // See https://github.com/jackc/pgx/issues/637
+ pgConn.frontend.Send(&pgproto3.Terminate{})
+ pgConn.flushWithPotentialWriteReadDeadlock()
+
+ return pgConn.conn.Close()
+}
+
+// asyncClose marks the connection as closed and asynchronously sends a cancel query message and closes the underlying
+// connection.
+func (pgConn *PgConn) asyncClose() {
+ if pgConn.status == connStatusClosed {
+ return
+ }
+ pgConn.status = connStatusClosed
+
+ go func() {
+ defer close(pgConn.cleanupDone)
+ defer pgConn.conn.Close()
+
+ deadline := time.Now().Add(time.Second * 15)
+
+ ctx, cancel := context.WithDeadline(context.Background(), deadline)
+ defer cancel()
+
+ pgConn.CancelRequest(ctx)
+
+ pgConn.conn.SetDeadline(deadline)
+
+ pgConn.frontend.Send(&pgproto3.Terminate{})
+ pgConn.flushWithPotentialWriteReadDeadlock()
+ }()
+}
+
+// CleanupDone returns a channel that will be closed after all underlying resources have been cleaned up. A closed
+// connection is no longer usable, but underlying resources, in particular the net.Conn, may not have finished closing
+// yet. This is because certain errors such as a context cancellation require that the interrupted function call return
+// immediately, but the error may also cause the connection to be closed. In these cases the underlying resources are
+// closed asynchronously.
+//
+// This is only likely to be useful to connection pools. It gives them a way avoid establishing a new connection while
+// an old connection is still being cleaned up and thereby exceeding the maximum pool size.
+func (pgConn *PgConn) CleanupDone() chan (struct{}) {
+ return pgConn.cleanupDone
+}
+
+// IsClosed reports if the connection has been closed.
+//
+// CleanupDone() can be used to determine if all cleanup has been completed.
+func (pgConn *PgConn) IsClosed() bool {
+ return pgConn.status < connStatusIdle
+}
+
+// IsBusy reports if the connection is busy.
+func (pgConn *PgConn) IsBusy() bool {
+ return pgConn.status == connStatusBusy
+}
+
+// lock locks the connection.
+func (pgConn *PgConn) lock() error {
+ switch pgConn.status {
+ case connStatusBusy:
+ return &connLockError{status: "conn busy"} // This only should be possible in case of an application bug.
+ case connStatusClosed:
+ return &connLockError{status: "conn closed"}
+ case connStatusUninitialized:
+ return &connLockError{status: "conn uninitialized"}
+ }
+ pgConn.status = connStatusBusy
+ return nil
+}
+
+func (pgConn *PgConn) unlock() {
+ switch pgConn.status {
+ case connStatusBusy:
+ pgConn.status = connStatusIdle
+ case connStatusClosed:
+ default:
+ panic("BUG: cannot unlock unlocked connection") // This should only be possible if there is a bug in this package.
+ }
+}
+
+// ParameterStatus returns the value of a parameter reported by the server (e.g.
+// server_version). Returns an empty string for unknown parameters.
+func (pgConn *PgConn) ParameterStatus(key string) string {
+ return pgConn.parameterStatuses[key]
+}
+
+// CommandTag is the status text returned by PostgreSQL for a query.
+type CommandTag struct {
+ s string
+}
+
+// NewCommandTag makes a CommandTag from s.
+func NewCommandTag(s string) CommandTag {
+ return CommandTag{s: s}
+}
+
+// RowsAffected returns the number of rows affected. If the CommandTag was not
+// for a row affecting command (e.g. "CREATE TABLE") then it returns 0.
+func (ct CommandTag) RowsAffected() int64 {
+ // Find last non-digit
+ idx := -1
+ for i := len(ct.s) - 1; i >= 0; i-- {
+ if ct.s[i] >= '0' && ct.s[i] <= '9' {
+ idx = i
+ } else {
+ break
+ }
+ }
+
+ if idx == -1 {
+ return 0
+ }
+
+ var n int64
+ for _, b := range ct.s[idx:] {
+ n = n*10 + int64(b-'0')
+ }
+
+ return n
+}
+
+func (ct CommandTag) String() string {
+ return ct.s
+}
+
+// Insert is true if the command tag starts with "INSERT".
+func (ct CommandTag) Insert() bool {
+ return strings.HasPrefix(ct.s, "INSERT")
+}
+
+// Update is true if the command tag starts with "UPDATE".
+func (ct CommandTag) Update() bool {
+ return strings.HasPrefix(ct.s, "UPDATE")
+}
+
+// Delete is true if the command tag starts with "DELETE".
+func (ct CommandTag) Delete() bool {
+ return strings.HasPrefix(ct.s, "DELETE")
+}
+
+// Select is true if the command tag starts with "SELECT".
+func (ct CommandTag) Select() bool {
+ return strings.HasPrefix(ct.s, "SELECT")
+}
+
+type FieldDescription struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+}
+
+func (pgConn *PgConn) convertRowDescription(dst []FieldDescription, rd *pgproto3.RowDescription) []FieldDescription {
+ if cap(dst) >= len(rd.Fields) {
+ dst = dst[:len(rd.Fields):len(rd.Fields)]
+ } else {
+ dst = make([]FieldDescription, len(rd.Fields))
+ }
+
+ for i := range rd.Fields {
+ dst[i].Name = string(rd.Fields[i].Name)
+ dst[i].TableOID = rd.Fields[i].TableOID
+ dst[i].TableAttributeNumber = rd.Fields[i].TableAttributeNumber
+ dst[i].DataTypeOID = rd.Fields[i].DataTypeOID
+ dst[i].DataTypeSize = rd.Fields[i].DataTypeSize
+ dst[i].TypeModifier = rd.Fields[i].TypeModifier
+ dst[i].Format = rd.Fields[i].Format
+ }
+
+ return dst
+}
+
+type StatementDescription struct {
+ Name string
+ SQL string
+ ParamOIDs []uint32
+ Fields []FieldDescription
+}
+
+// Prepare creates a prepared statement. If the name is empty, the anonymous prepared statement will be used. This
+// allows Prepare to also to describe statements without creating a server-side prepared statement.
+//
+// Prepare does not send a PREPARE statement to the server. It uses the PostgreSQL Parse and Describe protocol messages
+// directly.
+func (pgConn *PgConn) Prepare(ctx context.Context, name, sql string, paramOIDs []uint32) (*StatementDescription, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return nil, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ pgConn.frontend.SendParse(&pgproto3.Parse{Name: name, Query: sql, ParameterOIDs: paramOIDs})
+ pgConn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'S', Name: name})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return nil, err
+ }
+
+ psd := &StatementDescription{Name: name, SQL: sql}
+
+ var parseErr error
+
+readloop:
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return nil, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ psd.ParamOIDs = make([]uint32, len(msg.ParameterOIDs))
+ copy(psd.ParamOIDs, msg.ParameterOIDs)
+ case *pgproto3.RowDescription:
+ psd.Fields = pgConn.convertRowDescription(nil, msg)
+ case *pgproto3.ErrorResponse:
+ parseErr = ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ break readloop
+ }
+ }
+
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ return psd, nil
+}
+
+// Deallocate deallocates a prepared statement.
+//
+// Deallocate does not send a DEALLOCATE statement to the server. It uses the PostgreSQL Close protocol message
+// directly. This has slightly different behavior than executing DEALLOCATE statement.
+// - Deallocate can succeed in an aborted transaction.
+// - Deallocating a non-existent prepared statement is not an error.
+func (pgConn *PgConn) Deallocate(ctx context.Context, name string) error {
+ if err := pgConn.lock(); err != nil {
+ return err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ pgConn.frontend.SendClose(&pgproto3.Close{ObjectType: 'S', Name: name})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return err
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ return ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ return nil
+ }
+ }
+}
+
+// ErrorResponseToPgError converts a wire protocol error message to a *PgError.
+func ErrorResponseToPgError(msg *pgproto3.ErrorResponse) *PgError {
+ return &PgError{
+ Severity: msg.Severity,
+ SeverityUnlocalized: msg.SeverityUnlocalized,
+ Code: string(msg.Code),
+ Message: string(msg.Message),
+ Detail: string(msg.Detail),
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: string(msg.InternalQuery),
+ Where: string(msg.Where),
+ SchemaName: string(msg.SchemaName),
+ TableName: string(msg.TableName),
+ ColumnName: string(msg.ColumnName),
+ DataTypeName: string(msg.DataTypeName),
+ ConstraintName: msg.ConstraintName,
+ File: string(msg.File),
+ Line: msg.Line,
+ Routine: string(msg.Routine),
+ }
+}
+
+func noticeResponseToNotice(msg *pgproto3.NoticeResponse) *Notice {
+ pgerr := ErrorResponseToPgError((*pgproto3.ErrorResponse)(msg))
+ return (*Notice)(pgerr)
+}
+
+// CancelRequest sends a cancel request to the PostgreSQL server. It returns an error if unable to deliver the cancel
+// request, but lack of an error does not ensure that the query was canceled. As specified in the documentation, there
+// is no way to be sure a query was canceled. See https://www.postgresql.org/docs/11/protocol-flow.html#id-1.10.5.7.9
+func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
+ // Open a cancellation request to the same server. The address is taken from the net.Conn directly instead of reusing
+ // the connection config. This is important in high availability configurations where fallback connections may be
+ // specified or DNS may be used to load balance.
+ serverAddr := pgConn.conn.RemoteAddr()
+ var serverNetwork string
+ var serverAddress string
+ if serverAddr.Network() == "unix" {
+ // for unix sockets, RemoteAddr() calls getpeername() which returns the name the
+ // server passed to bind(). For Postgres, this is always a relative path "./.s.PGSQL.5432"
+ // so connecting to it will fail. Fall back to the config's value
+ serverNetwork, serverAddress = NetworkAddress(pgConn.config.Host, pgConn.config.Port)
+ } else {
+ serverNetwork, serverAddress = serverAddr.Network(), serverAddr.String()
+ }
+ cancelConn, err := pgConn.config.DialFunc(ctx, serverNetwork, serverAddress)
+ if err != nil {
+ // In case of unix sockets, RemoteAddr() returns only the file part of the path. If the
+ // first connect failed, try the config.
+ if serverAddr.Network() != "unix" {
+ return err
+ }
+ serverNetwork, serverAddr := NetworkAddress(pgConn.config.Host, pgConn.config.Port)
+ cancelConn, err = pgConn.config.DialFunc(ctx, serverNetwork, serverAddr)
+ if err != nil {
+ return err
+ }
+ }
+ defer cancelConn.Close()
+
+ if ctx != context.Background() {
+ contextWatcher := ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: cancelConn})
+ contextWatcher.Watch(ctx)
+ defer contextWatcher.Unwatch()
+ }
+
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint32(buf[0:4], 16)
+ binary.BigEndian.PutUint32(buf[4:8], 80877102)
+ binary.BigEndian.PutUint32(buf[8:12], pgConn.pid)
+ binary.BigEndian.PutUint32(buf[12:16], pgConn.secretKey)
+
+ if _, err := cancelConn.Write(buf); err != nil {
+ return fmt.Errorf("write to connection for cancellation: %w", err)
+ }
+
+ // Wait for the cancel request to be acknowledged by the server.
+ // It copies the behavior of the libpq: https://github.com/postgres/postgres/blob/REL_16_0/src/interfaces/libpq/fe-connect.c#L4946-L4960
+ _, _ = cancelConn.Read(buf)
+
+ return nil
+}
+
+// WaitForNotification waits for a LISTEN/NOTIFY message to be received. It returns an error if a notification was not
+// received.
+func (pgConn *PgConn) WaitForNotification(ctx context.Context) error {
+ if err := pgConn.lock(); err != nil {
+ return err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return newContextAlreadyDoneError(ctx)
+ default:
+ }
+
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ return normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg.(type) {
+ case *pgproto3.NotificationResponse:
+ return nil
+ }
+ }
+}
+
+// Exec executes SQL via the PostgreSQL simple query protocol. SQL may contain multiple queries. Execution is
+// implicitly wrapped in a transaction unless a transaction is already in progress or SQL contains transaction control
+// statements.
+//
+// Prefer ExecParams unless executing arbitrary SQL that may contain multiple queries.
+func (pgConn *PgConn) Exec(ctx context.Context, sql string) *MultiResultReader {
+ if err := pgConn.lock(); err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: err,
+ }
+ }
+
+ pgConn.multiResultReader = MultiResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ multiResult := &pgConn.multiResultReader
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ multiResult.closed = true
+ multiResult.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return multiResult
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ pgConn.contextWatcher.Unwatch()
+ multiResult.closed = true
+ multiResult.err = err
+ pgConn.unlock()
+ return multiResult
+ }
+
+ return multiResult
+}
+
+// ExecParams executes a command via the PostgreSQL extended query protocol.
+//
+// sql is a SQL command string. It may only contain one query. Parameter substitution is positional using $1, $2, $3,
+// etc.
+//
+// paramValues are the parameter values. It must be encoded in the format given by paramFormats.
+//
+// paramOIDs is a slice of data type OIDs for paramValues. If paramOIDs is nil, the server will infer the data type for
+// all parameters. Any paramOID element that is 0 that will cause the server to infer the data type for that parameter.
+// ExecParams will panic if len(paramOIDs) is not 0, 1, or len(paramValues).
+//
+// paramFormats is a slice of format codes determining for each paramValue column whether it is encoded in text or
+// binary format. If paramFormats is nil all params are text format. ExecParams will panic if
+// len(paramFormats) is not 0, 1, or len(paramValues).
+//
+// resultFormats is a slice of format codes determining for each result column whether it is encoded in text or
+// binary format. If resultFormats is nil all results will be in text format.
+//
+// ResultReader must be closed before PgConn can be used again.
+func (pgConn *PgConn) ExecParams(ctx context.Context, sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) *ResultReader {
+ result := pgConn.execExtendedPrefix(ctx, paramValues)
+ if result.closed {
+ return result
+ }
+
+ pgConn.frontend.SendParse(&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs})
+ pgConn.frontend.SendBind(&pgproto3.Bind{ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+
+ pgConn.execExtendedSuffix(result)
+
+ return result
+}
+
+// ExecPrepared enqueues the execution of a prepared statement via the PostgreSQL extended query protocol.
+//
+// paramValues are the parameter values. It must be encoded in the format given by paramFormats.
+//
+// paramFormats is a slice of format codes determining for each paramValue column whether it is encoded in text or
+// binary format. If paramFormats is nil all params are text format. ExecPrepared will panic if
+// len(paramFormats) is not 0, 1, or len(paramValues).
+//
+// resultFormats is a slice of format codes determining for each result column whether it is encoded in text or
+// binary format. If resultFormats is nil all results will be in text format.
+//
+// ResultReader must be closed before PgConn can be used again.
+func (pgConn *PgConn) ExecPrepared(ctx context.Context, stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) *ResultReader {
+ result := pgConn.execExtendedPrefix(ctx, paramValues)
+ if result.closed {
+ return result
+ }
+
+ pgConn.frontend.SendBind(&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+
+ pgConn.execExtendedSuffix(result)
+
+ return result
+}
+
+func (pgConn *PgConn) execExtendedPrefix(ctx context.Context, paramValues [][]byte) *ResultReader {
+ pgConn.resultReader = ResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ result := &pgConn.resultReader
+
+ if err := pgConn.lock(); err != nil {
+ result.concludeCommand(CommandTag{}, err)
+ result.closed = true
+ return result
+ }
+
+ if len(paramValues) > math.MaxUint16 {
+ result.concludeCommand(CommandTag{}, fmt.Errorf("extended protocol limited to %v parameters", math.MaxUint16))
+ result.closed = true
+ pgConn.unlock()
+ return result
+ }
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ result.concludeCommand(CommandTag{}, newContextAlreadyDoneError(ctx))
+ result.closed = true
+ pgConn.unlock()
+ return result
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ return result
+}
+
+func (pgConn *PgConn) execExtendedSuffix(result *ResultReader) {
+ pgConn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ pgConn.frontend.SendExecute(&pgproto3.Execute{})
+ pgConn.frontend.SendSync(&pgproto3.Sync{})
+
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ result.concludeCommand(CommandTag{}, err)
+ pgConn.contextWatcher.Unwatch()
+ result.closed = true
+ pgConn.unlock()
+ return
+ }
+
+ result.readUntilRowDescription()
+}
+
+// CopyTo executes the copy command sql and copies the results to w.
+func (pgConn *PgConn) CopyTo(ctx context.Context, w io.Writer, sql string) (CommandTag, error) {
+ if err := pgConn.lock(); err != nil {
+ return CommandTag{}, err
+ }
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ pgConn.unlock()
+ return CommandTag{}, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Send copy to command
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ pgConn.unlock()
+ return CommandTag{}, err
+ }
+
+ // Read results
+ var commandTag CommandTag
+ var pgErr error
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CopyDone:
+ case *pgproto3.CopyData:
+ _, err := w.Write(msg.Data)
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+ case *pgproto3.ReadyForQuery:
+ pgConn.unlock()
+ return commandTag, pgErr
+ case *pgproto3.CommandComplete:
+ commandTag = pgConn.makeCommandTag(msg.CommandTag)
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ }
+ }
+}
+
+// CopyFrom executes the copy command sql and copies all of r to the PostgreSQL server.
+//
+// Note: context cancellation will only interrupt operations on the underlying PostgreSQL network connection. Reads on r
+// could still block.
+func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (CommandTag, error) {
+ if err := pgConn.lock(); err != nil {
+ return CommandTag{}, err
+ }
+ defer pgConn.unlock()
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ return CommandTag{}, newContextAlreadyDoneError(ctx)
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ defer pgConn.contextWatcher.Unwatch()
+ }
+
+ // Send copy from query
+ pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
+ err := pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+
+ // Send copy data
+ abortCopyChan := make(chan struct{})
+ copyErrChan := make(chan error, 1)
+ signalMessageChan := pgConn.signalMessage()
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ buf := iobufpool.Get(65536)
+ defer iobufpool.Put(buf)
+ (*buf)[0] = 'd'
+
+ for {
+ n, readErr := r.Read((*buf)[5:cap(*buf)])
+ if n > 0 {
+ *buf = (*buf)[0 : n+5]
+ pgio.SetInt32((*buf)[1:], int32(n+4))
+
+ writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(*buf)
+ if writeErr != nil {
+ // Write errors are always fatal, but we can't use asyncClose because we are in a different goroutine. Not
+ // setting pgConn.status or closing pgConn.cleanupDone for the same reason.
+ pgConn.conn.Close()
+
+ copyErrChan <- writeErr
+ return
+ }
+ }
+ if readErr != nil {
+ copyErrChan <- readErr
+ return
+ }
+
+ select {
+ case <-abortCopyChan:
+ return
+ default:
+ }
+ }
+ }()
+
+ var pgErr error
+ var copyErr error
+ for copyErr == nil && pgErr == nil {
+ select {
+ case copyErr = <-copyErrChan:
+ case <-signalMessageChan:
+ // If pgConn.receiveMessage encounters an error it will call pgConn.asyncClose. But that is a race condition with
+ // the goroutine. So instead check pgConn.bufferingReceiveErr which will have been set by the signalMessage. If an
+ // error is found then forcibly close the connection without sending the Terminate message.
+ if err := pgConn.bufferingReceiveErr; err != nil {
+ pgConn.status = connStatusClosed
+ pgConn.conn.Close()
+ close(pgConn.cleanupDone)
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+ msg, _ := pgConn.receiveMessage()
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ default:
+ signalMessageChan = pgConn.signalMessage()
+ }
+ }
+ }
+ close(abortCopyChan)
+ // Make sure io goroutine finishes before writing.
+ wg.Wait()
+
+ if copyErr == io.EOF || pgErr != nil {
+ pgConn.frontend.Send(&pgproto3.CopyDone{})
+ } else {
+ pgConn.frontend.Send(&pgproto3.CopyFail{Message: copyErr.Error()})
+ }
+ err = pgConn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, err
+ }
+
+ // Read results
+ var commandTag CommandTag
+ for {
+ msg, err := pgConn.receiveMessage()
+ if err != nil {
+ pgConn.asyncClose()
+ return CommandTag{}, normalizeTimeoutError(ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ return commandTag, pgErr
+ case *pgproto3.CommandComplete:
+ commandTag = pgConn.makeCommandTag(msg.CommandTag)
+ case *pgproto3.ErrorResponse:
+ pgErr = ErrorResponseToPgError(msg)
+ }
+ }
+}
+
+// MultiResultReader is a reader for a command that could return multiple results such as Exec or ExecBatch.
+type MultiResultReader struct {
+ pgConn *PgConn
+ ctx context.Context
+ pipeline *Pipeline
+
+ rr *ResultReader
+
+ closed bool
+ err error
+}
+
+// ReadAll reads all available results. Calling ReadAll is mutually exclusive with all other MultiResultReader methods.
+func (mrr *MultiResultReader) ReadAll() ([]*Result, error) {
+ var results []*Result
+
+ for mrr.NextResult() {
+ results = append(results, mrr.ResultReader().Read())
+ }
+ err := mrr.Close()
+
+ return results, err
+}
+
+func (mrr *MultiResultReader) receiveMessage() (pgproto3.BackendMessage, error) {
+ msg, err := mrr.pgConn.receiveMessage()
+ if err != nil {
+ mrr.pgConn.contextWatcher.Unwatch()
+ mrr.err = normalizeTimeoutError(mrr.ctx, err)
+ mrr.closed = true
+ mrr.pgConn.asyncClose()
+ return nil, mrr.err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ mrr.closed = true
+ if mrr.pipeline != nil {
+ mrr.pipeline.expectedReadyForQueryCount--
+ } else {
+ mrr.pgConn.contextWatcher.Unwatch()
+ mrr.pgConn.unlock()
+ }
+ case *pgproto3.ErrorResponse:
+ mrr.err = ErrorResponseToPgError(msg)
+ }
+
+ return msg, nil
+}
+
+// NextResult returns advances the MultiResultReader to the next result and returns true if a result is available.
+func (mrr *MultiResultReader) NextResult() bool {
+ for !mrr.closed && mrr.err == nil {
+ msg, err := mrr.receiveMessage()
+ if err != nil {
+ return false
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ mrr.pgConn.resultReader = ResultReader{
+ pgConn: mrr.pgConn,
+ multiResultReader: mrr,
+ ctx: mrr.ctx,
+ fieldDescriptions: mrr.pgConn.convertRowDescription(mrr.pgConn.fieldDescriptions[:], msg),
+ }
+
+ mrr.rr = &mrr.pgConn.resultReader
+ return true
+ case *pgproto3.CommandComplete:
+ mrr.pgConn.resultReader = ResultReader{
+ commandTag: mrr.pgConn.makeCommandTag(msg.CommandTag),
+ commandConcluded: true,
+ closed: true,
+ }
+ mrr.rr = &mrr.pgConn.resultReader
+ return true
+ case *pgproto3.EmptyQueryResponse:
+ return false
+ }
+ }
+
+ return false
+}
+
+// ResultReader returns the current ResultReader.
+func (mrr *MultiResultReader) ResultReader() *ResultReader {
+ return mrr.rr
+}
+
+// Close closes the MultiResultReader and returns the first error that occurred during the MultiResultReader's use.
+func (mrr *MultiResultReader) Close() error {
+ for !mrr.closed {
+ _, err := mrr.receiveMessage()
+ if err != nil {
+ return mrr.err
+ }
+ }
+
+ return mrr.err
+}
+
+// ResultReader is a reader for the result of a single query.
+type ResultReader struct {
+ pgConn *PgConn
+ multiResultReader *MultiResultReader
+ pipeline *Pipeline
+ ctx context.Context
+
+ fieldDescriptions []FieldDescription
+ rowValues [][]byte
+ commandTag CommandTag
+ commandConcluded bool
+ closed bool
+ err error
+}
+
+// Result is the saved query response that is returned by calling Read on a ResultReader.
+type Result struct {
+ FieldDescriptions []FieldDescription
+ Rows [][][]byte
+ CommandTag CommandTag
+ Err error
+}
+
+// Read saves the query response to a Result.
+func (rr *ResultReader) Read() *Result {
+ br := &Result{}
+
+ for rr.NextRow() {
+ if br.FieldDescriptions == nil {
+ br.FieldDescriptions = make([]FieldDescription, len(rr.FieldDescriptions()))
+ copy(br.FieldDescriptions, rr.FieldDescriptions())
+ }
+
+ values := rr.Values()
+ row := make([][]byte, len(values))
+ for i := range row {
+ if values[i] != nil {
+ row[i] = make([]byte, len(values[i]))
+ copy(row[i], values[i])
+ }
+ }
+ br.Rows = append(br.Rows, row)
+ }
+
+ br.CommandTag, br.Err = rr.Close()
+
+ return br
+}
+
+// NextRow advances the ResultReader to the next row and returns true if a row is available.
+func (rr *ResultReader) NextRow() bool {
+ for !rr.commandConcluded {
+ msg, err := rr.receiveMessage()
+ if err != nil {
+ return false
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.DataRow:
+ rr.rowValues = msg.Values
+ return true
+ }
+ }
+
+ return false
+}
+
+// FieldDescriptions returns the field descriptions for the current result set. The returned slice is only valid until
+// the ResultReader is closed. It may return nil (for example, if the query did not return a result set or an error was
+// encountered.)
+func (rr *ResultReader) FieldDescriptions() []FieldDescription {
+ return rr.fieldDescriptions
+}
+
+// Values returns the current row data. NextRow must have been previously been called. The returned [][]byte is only
+// valid until the next NextRow call or the ResultReader is closed.
+func (rr *ResultReader) Values() [][]byte {
+ return rr.rowValues
+}
+
+// Close consumes any remaining result data and returns the command tag or
+// error.
+func (rr *ResultReader) Close() (CommandTag, error) {
+ if rr.closed {
+ return rr.commandTag, rr.err
+ }
+ rr.closed = true
+
+ for !rr.commandConcluded {
+ _, err := rr.receiveMessage()
+ if err != nil {
+ return CommandTag{}, rr.err
+ }
+ }
+
+ if rr.multiResultReader == nil && rr.pipeline == nil {
+ for {
+ msg, err := rr.receiveMessage()
+ if err != nil {
+ return CommandTag{}, rr.err
+ }
+
+ switch msg := msg.(type) {
+ // Detect a deferred constraint violation where the ErrorResponse is sent after CommandComplete.
+ case *pgproto3.ErrorResponse:
+ rr.err = ErrorResponseToPgError(msg)
+ case *pgproto3.ReadyForQuery:
+ rr.pgConn.contextWatcher.Unwatch()
+ rr.pgConn.unlock()
+ return rr.commandTag, rr.err
+ }
+ }
+ }
+
+ return rr.commandTag, rr.err
+}
+
+// readUntilRowDescription ensures the ResultReader's fieldDescriptions are loaded. It does not return an error as any
+// error will be stored in the ResultReader.
+func (rr *ResultReader) readUntilRowDescription() {
+ for !rr.commandConcluded {
+ // Peek before receive to avoid consuming a DataRow if the result set does not include a RowDescription method.
+ // This should never happen under normal pgconn usage, but it is possible if SendBytes and ReceiveResults are
+ // manually used to construct a query that does not issue a describe statement.
+ msg, _ := rr.pgConn.peekMessage()
+ if _, ok := msg.(*pgproto3.DataRow); ok {
+ return
+ }
+
+ // Consume the message
+ msg, _ = rr.receiveMessage()
+ if _, ok := msg.(*pgproto3.RowDescription); ok {
+ return
+ }
+ }
+}
+
+func (rr *ResultReader) receiveMessage() (msg pgproto3.BackendMessage, err error) {
+ if rr.multiResultReader == nil {
+ msg, err = rr.pgConn.receiveMessage()
+ } else {
+ msg, err = rr.multiResultReader.receiveMessage()
+ }
+
+ if err != nil {
+ err = normalizeTimeoutError(rr.ctx, err)
+ rr.concludeCommand(CommandTag{}, err)
+ rr.pgConn.contextWatcher.Unwatch()
+ rr.closed = true
+ if rr.multiResultReader == nil {
+ rr.pgConn.asyncClose()
+ }
+
+ return nil, rr.err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rr.fieldDescriptions = rr.pgConn.convertRowDescription(rr.pgConn.fieldDescriptions[:], msg)
+ case *pgproto3.CommandComplete:
+ rr.concludeCommand(rr.pgConn.makeCommandTag(msg.CommandTag), nil)
+ case *pgproto3.EmptyQueryResponse:
+ rr.concludeCommand(CommandTag{}, nil)
+ case *pgproto3.ErrorResponse:
+ rr.concludeCommand(CommandTag{}, ErrorResponseToPgError(msg))
+ }
+
+ return msg, nil
+}
+
+func (rr *ResultReader) concludeCommand(commandTag CommandTag, err error) {
+ // Keep the first error that is recorded. Store the error before checking if the command is already concluded to
+ // allow for receiving an error after CommandComplete but before ReadyForQuery.
+ if err != nil && rr.err == nil {
+ rr.err = err
+ }
+
+ if rr.commandConcluded {
+ return
+ }
+
+ rr.commandTag = commandTag
+ rr.rowValues = nil
+ rr.commandConcluded = true
+}
+
+// Batch is a collection of queries that can be sent to the PostgreSQL server in a single round-trip.
+type Batch struct {
+ buf []byte
+ err error
+}
+
+// ExecParams appends an ExecParams command to the batch. See PgConn.ExecParams for parameter descriptions.
+func (batch *Batch) ExecParams(sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) {
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+ batch.ExecPrepared("", paramValues, paramFormats, resultFormats)
+}
+
+// ExecPrepared appends an ExecPrepared e command to the batch. See PgConn.ExecPrepared for parameter descriptions.
+func (batch *Batch) ExecPrepared(stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) {
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Describe{ObjectType: 'P'}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+
+ batch.buf, batch.err = (&pgproto3.Execute{}).Encode(batch.buf)
+ if batch.err != nil {
+ return
+ }
+}
+
+// ExecBatch executes all the queries in batch in a single round-trip. Execution is implicitly transactional unless a
+// transaction is already in progress or SQL contains transaction control statements. This is a simpler way of executing
+// multiple queries in a single round trip than using pipeline mode.
+func (pgConn *PgConn) ExecBatch(ctx context.Context, batch *Batch) *MultiResultReader {
+ if batch.err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: batch.err,
+ }
+ }
+
+ if err := pgConn.lock(); err != nil {
+ return &MultiResultReader{
+ closed: true,
+ err: err,
+ }
+ }
+
+ pgConn.multiResultReader = MultiResultReader{
+ pgConn: pgConn,
+ ctx: ctx,
+ }
+ multiResult := &pgConn.multiResultReader
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ multiResult.closed = true
+ multiResult.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return multiResult
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ batch.buf, batch.err = (&pgproto3.Sync{}).Encode(batch.buf)
+ if batch.err != nil {
+ multiResult.closed = true
+ multiResult.err = batch.err
+ pgConn.unlock()
+ return multiResult
+ }
+
+ pgConn.enterPotentialWriteReadDeadlock()
+ defer pgConn.exitPotentialWriteReadDeadlock()
+ _, err := pgConn.conn.Write(batch.buf)
+ if err != nil {
+ multiResult.closed = true
+ multiResult.err = err
+ pgConn.unlock()
+ return multiResult
+ }
+
+ return multiResult
+}
+
+// EscapeString escapes a string such that it can safely be interpolated into a SQL command string. It does not include
+// the surrounding single quotes.
+//
+// The current implementation requires that standard_conforming_strings=on and client_encoding="UTF8". If these
+// conditions are not met an error will be returned. It is possible these restrictions will be lifted in the future.
+func (pgConn *PgConn) EscapeString(s string) (string, error) {
+ if pgConn.ParameterStatus("standard_conforming_strings") != "on" {
+ return "", errors.New("EscapeString must be run with standard_conforming_strings=on")
+ }
+
+ if pgConn.ParameterStatus("client_encoding") != "UTF8" {
+ return "", errors.New("EscapeString must be run with client_encoding=UTF8")
+ }
+
+ return strings.Replace(s, "'", "''", -1), nil
+}
+
+// CheckConn checks the underlying connection without writing any bytes. This is currently implemented by doing a read
+// with a very short deadline. This can be useful because a TCP connection can be broken such that a write will appear
+// to succeed even though it will never actually reach the server. Reading immediately before a write will detect this
+// condition. If this is done immediately before sending a query it reduces the chances a query will be sent that fails
+// without the client knowing whether the server received it or not.
+//
+// Deprecated: CheckConn is deprecated in favor of Ping. CheckConn cannot detect all types of broken connections where
+// the write would still appear to succeed. Prefer Ping unless on a high latency connection.
+func (pgConn *PgConn) CheckConn() error {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
+ defer cancel()
+
+ _, err := pgConn.ReceiveMessage(ctx)
+ if err != nil {
+ if !Timeout(err) {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Ping pings the server. This can be useful because a TCP connection can be broken such that a write will appear to
+// succeed even though it will never actually reach the server. Pinging immediately before sending a query reduces the
+// chances a query will be sent that fails without the client knowing whether the server received it or not.
+func (pgConn *PgConn) Ping(ctx context.Context) error {
+ return pgConn.Exec(ctx, "-- ping").Close()
+}
+
+// makeCommandTag makes a CommandTag. It does not retain a reference to buf or buf's underlying memory.
+func (pgConn *PgConn) makeCommandTag(buf []byte) CommandTag {
+ return CommandTag{s: string(buf)}
+}
+
+// enterPotentialWriteReadDeadlock must be called before a write that could deadlock if the server is simultaneously
+// blocked writing to us.
+func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
+ // The time to wait is somewhat arbitrary. A Write should only take as long as the syscall and memcpy to the OS
+ // outbound network buffer unless the buffer is full (which potentially is a block). It needs to be long enough for
+ // the normal case, but short enough not to kill performance if a block occurs.
+ //
+ // In addition, on Windows the default timer resolution is 15.6ms. So setting the timer to less than that is
+ // ineffective.
+ if pgConn.slowWriteTimer.Reset(15 * time.Millisecond) {
+ panic("BUG: slow write timer already active")
+ }
+}
+
+// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
+func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
+ if !pgConn.slowWriteTimer.Stop() {
+ // The timer starts its function in a separate goroutine. It is necessary to ensure the background reader has
+ // started before calling Stop. Otherwise, the background reader may not be stopped. That on its own is not a
+ // serious problem. But what is a serious problem is that the background reader may start at an inopportune time in
+ // a subsequent query. For example, if a subsequent query was canceled then a deadline may be set on the net.Conn to
+ // interrupt an in-progress read. After the read is interrupted, but before the deadline is cleared, the background
+ // reader could start and read a deadline error. Then the next query would receive the an unexpected deadline error.
+ <-pgConn.bgReaderStarted
+ pgConn.bgReader.Stop()
+ }
+}
+
+func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
+ pgConn.enterPotentialWriteReadDeadlock()
+ defer pgConn.exitPotentialWriteReadDeadlock()
+ err := pgConn.frontend.Flush()
+ return err
+}
+
+// SyncConn prepares the underlying net.Conn for direct use. PgConn may internally buffer reads or use goroutines for
+// background IO. This means that any direct use of the underlying net.Conn may be corrupted if a read is already
+// buffered or a read is in progress. SyncConn drains read buffers and stops background IO. In some cases this may
+// require sending a ping to the server. ctx can be used to cancel this operation. This should be called before any
+// operation that will use the underlying net.Conn directly. e.g. Before Conn() or Hijack().
+//
+// This should not be confused with the PostgreSQL protocol Sync message.
+func (pgConn *PgConn) SyncConn(ctx context.Context) error {
+ for i := 0; i < 10; i++ {
+ if pgConn.bgReader.Status() == bgreader.StatusStopped && pgConn.frontend.ReadBufferLen() == 0 {
+ return nil
+ }
+
+ err := pgConn.Ping(ctx)
+ if err != nil {
+ return fmt.Errorf("SyncConn: Ping failed while syncing conn: %w", err)
+ }
+ }
+
+ // This should never happen. Only way I can imagine this occurring is if the server is constantly sending data such as
+ // LISTEN/NOTIFY or log notifications such that we never can get an empty buffer.
+ return errors.New("SyncConn: conn never synchronized")
+}
+
+// CustomData returns a map that can be used to associate custom data with the connection.
+func (pgConn *PgConn) CustomData() map[string]any {
+ return pgConn.customData
+}
+
+// HijackedConn is the result of hijacking a connection.
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+type HijackedConn struct {
+ Conn net.Conn
+ PID uint32 // backend pid
+ SecretKey uint32 // key to use to send a cancel query message to the server
+ ParameterStatuses map[string]string // parameters that have been reported by the server
+ TxStatus byte
+ Frontend *pgproto3.Frontend
+ Config *Config
+ CustomData map[string]any
+}
+
+// Hijack extracts the internal connection data. pgConn must be in an idle state. SyncConn should be called immediately
+// before Hijack. pgConn is unusable after hijacking. Hijacking is typically only useful when using pgconn to establish
+// a connection, but taking complete control of the raw connection after that (e.g. a load balancer or proxy).
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+func (pgConn *PgConn) Hijack() (*HijackedConn, error) {
+ if err := pgConn.lock(); err != nil {
+ return nil, err
+ }
+ pgConn.status = connStatusClosed
+
+ return &HijackedConn{
+ Conn: pgConn.conn,
+ PID: pgConn.pid,
+ SecretKey: pgConn.secretKey,
+ ParameterStatuses: pgConn.parameterStatuses,
+ TxStatus: pgConn.txStatus,
+ Frontend: pgConn.frontend,
+ Config: pgConn.config,
+ CustomData: pgConn.customData,
+ }, nil
+}
+
+// Construct created a PgConn from an already established connection to a PostgreSQL server. This is the inverse of
+// PgConn.Hijack. The connection must be in an idle state.
+//
+// hc.Frontend is replaced by a new pgproto3.Frontend built by hc.Config.BuildFrontend.
+//
+// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
+// compatibility.
+func Construct(hc *HijackedConn) (*PgConn, error) {
+ pgConn := &PgConn{
+ conn: hc.Conn,
+ pid: hc.PID,
+ secretKey: hc.SecretKey,
+ parameterStatuses: hc.ParameterStatuses,
+ txStatus: hc.TxStatus,
+ frontend: hc.Frontend,
+ config: hc.Config,
+ customData: hc.CustomData,
+
+ status: connStatusIdle,
+
+ cleanupDone: make(chan struct{}),
+ }
+
+ pgConn.contextWatcher = ctxwatch.NewContextWatcher(hc.Config.BuildContextWatcherHandler(pgConn))
+ pgConn.bgReader = bgreader.New(pgConn.conn)
+ pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
+ func() {
+ pgConn.bgReader.Start()
+ pgConn.bgReaderStarted <- struct{}{}
+ },
+ )
+ pgConn.slowWriteTimer.Stop()
+ pgConn.bgReaderStarted = make(chan struct{})
+ pgConn.frontend = hc.Config.BuildFrontend(pgConn.bgReader, pgConn.conn)
+
+ return pgConn, nil
+}
+
+// Pipeline represents a connection in pipeline mode.
+//
+// SendPrepare, SendQueryParams, and SendQueryPrepared queue requests to the server. These requests are not written until
+// pipeline is flushed by Flush or Sync. Sync must be called after the last request is queued. Requests between
+// synchronization points are implicitly transactional unless explicit transaction control statements have been issued.
+//
+// The context the pipeline was started with is in effect for the entire life of the Pipeline.
+//
+// For a deeper understanding of pipeline mode see the PostgreSQL documentation for the extended query protocol
+// (https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY) and the libpq pipeline mode
+// (https://www.postgresql.org/docs/current/libpq-pipeline-mode.html).
+type Pipeline struct {
+ conn *PgConn
+ ctx context.Context
+
+ expectedReadyForQueryCount int
+ pendingSync bool
+
+ err error
+ closed bool
+}
+
+// PipelineSync is returned by GetResults when a ReadyForQuery message is received.
+type PipelineSync struct{}
+
+// CloseComplete is returned by GetResults when a CloseComplete message is received.
+type CloseComplete struct{}
+
+// StartPipeline switches the connection to pipeline mode and returns a *Pipeline. In pipeline mode requests can be sent
+// to the server without waiting for a response. Close must be called on the returned *Pipeline to return the connection
+// to normal mode. While in pipeline mode, no methods that communicate with the server may be called except
+// CancelRequest and Close. ctx is in effect for entire life of the *Pipeline.
+//
+// Prefer ExecBatch when only sending one group of queries at once.
+func (pgConn *PgConn) StartPipeline(ctx context.Context) *Pipeline {
+ if err := pgConn.lock(); err != nil {
+ return &Pipeline{
+ closed: true,
+ err: err,
+ }
+ }
+
+ pgConn.pipeline = Pipeline{
+ conn: pgConn,
+ ctx: ctx,
+ }
+ pipeline := &pgConn.pipeline
+
+ if ctx != context.Background() {
+ select {
+ case <-ctx.Done():
+ pipeline.closed = true
+ pipeline.err = newContextAlreadyDoneError(ctx)
+ pgConn.unlock()
+ return pipeline
+ default:
+ }
+ pgConn.contextWatcher.Watch(ctx)
+ }
+
+ return pipeline
+}
+
+// SendPrepare is the pipeline version of *PgConn.Prepare.
+func (p *Pipeline) SendPrepare(name, sql string, paramOIDs []uint32) {
+ if p.closed {
+ return
+ }
+ p.pendingSync = true
+
+ p.conn.frontend.SendParse(&pgproto3.Parse{Name: name, Query: sql, ParameterOIDs: paramOIDs})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'S', Name: name})
+}
+
+// SendDeallocate deallocates a prepared statement.
+func (p *Pipeline) SendDeallocate(name string) {
+ if p.closed {
+ return
+ }
+ p.pendingSync = true
+
+ p.conn.frontend.SendClose(&pgproto3.Close{ObjectType: 'S', Name: name})
+}
+
+// SendQueryParams is the pipeline version of *PgConn.QueryParams.
+func (p *Pipeline) SendQueryParams(sql string, paramValues [][]byte, paramOIDs []uint32, paramFormats []int16, resultFormats []int16) {
+ if p.closed {
+ return
+ }
+ p.pendingSync = true
+
+ p.conn.frontend.SendParse(&pgproto3.Parse{Query: sql, ParameterOIDs: paramOIDs})
+ p.conn.frontend.SendBind(&pgproto3.Bind{ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ p.conn.frontend.SendExecute(&pgproto3.Execute{})
+}
+
+// SendQueryPrepared is the pipeline version of *PgConn.QueryPrepared.
+func (p *Pipeline) SendQueryPrepared(stmtName string, paramValues [][]byte, paramFormats []int16, resultFormats []int16) {
+ if p.closed {
+ return
+ }
+ p.pendingSync = true
+
+ p.conn.frontend.SendBind(&pgproto3.Bind{PreparedStatement: stmtName, ParameterFormatCodes: paramFormats, Parameters: paramValues, ResultFormatCodes: resultFormats})
+ p.conn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'P'})
+ p.conn.frontend.SendExecute(&pgproto3.Execute{})
+}
+
+// Flush flushes the queued requests without establishing a synchronization point.
+func (p *Pipeline) Flush() error {
+ if p.closed {
+ if p.err != nil {
+ return p.err
+ }
+ return errors.New("pipeline closed")
+ }
+
+ err := p.conn.flushWithPotentialWriteReadDeadlock()
+ if err != nil {
+ err = normalizeTimeoutError(p.ctx, err)
+
+ p.conn.asyncClose()
+
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+ p.closed = true
+ p.err = err
+ return err
+ }
+
+ return nil
+}
+
+// Sync establishes a synchronization point and flushes the queued requests.
+func (p *Pipeline) Sync() error {
+ if p.closed {
+ if p.err != nil {
+ return p.err
+ }
+ return errors.New("pipeline closed")
+ }
+
+ p.conn.frontend.SendSync(&pgproto3.Sync{})
+ err := p.Flush()
+ if err != nil {
+ return err
+ }
+
+ p.pendingSync = false
+ p.expectedReadyForQueryCount++
+
+ return nil
+}
+
+// GetResults gets the next results. If results are present, results may be a *ResultReader, *StatementDescription, or
+// *PipelineSync. If an ErrorResponse is received from the server, results will be nil and err will be a *PgError. If no
+// results are available, results and err will both be nil.
+func (p *Pipeline) GetResults() (results any, err error) {
+ if p.closed {
+ if p.err != nil {
+ return nil, p.err
+ }
+ return nil, errors.New("pipeline closed")
+ }
+
+ if p.expectedReadyForQueryCount == 0 {
+ return nil, nil
+ }
+
+ return p.getResults()
+}
+
+func (p *Pipeline) getResults() (results any, err error) {
+ for {
+ msg, err := p.conn.receiveMessage()
+ if err != nil {
+ p.closed = true
+ p.err = err
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ p.conn.resultReader = ResultReader{
+ pgConn: p.conn,
+ pipeline: p,
+ ctx: p.ctx,
+ fieldDescriptions: p.conn.convertRowDescription(p.conn.fieldDescriptions[:], msg),
+ }
+ return &p.conn.resultReader, nil
+ case *pgproto3.CommandComplete:
+ p.conn.resultReader = ResultReader{
+ commandTag: p.conn.makeCommandTag(msg.CommandTag),
+ commandConcluded: true,
+ closed: true,
+ }
+ return &p.conn.resultReader, nil
+ case *pgproto3.ParseComplete:
+ peekedMsg, err := p.conn.peekMessage()
+ if err != nil {
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+ if _, ok := peekedMsg.(*pgproto3.ParameterDescription); ok {
+ return p.getResultsPrepare()
+ }
+ case *pgproto3.CloseComplete:
+ return &CloseComplete{}, nil
+ case *pgproto3.ReadyForQuery:
+ p.expectedReadyForQueryCount--
+ return &PipelineSync{}, nil
+ case *pgproto3.ErrorResponse:
+ pgErr := ErrorResponseToPgError(msg)
+ return nil, pgErr
+ }
+
+ }
+}
+
+func (p *Pipeline) getResultsPrepare() (*StatementDescription, error) {
+ psd := &StatementDescription{}
+
+ for {
+ msg, err := p.conn.receiveMessage()
+ if err != nil {
+ p.conn.asyncClose()
+ return nil, normalizeTimeoutError(p.ctx, err)
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ psd.ParamOIDs = make([]uint32, len(msg.ParameterOIDs))
+ copy(psd.ParamOIDs, msg.ParameterOIDs)
+ case *pgproto3.RowDescription:
+ psd.Fields = p.conn.convertRowDescription(nil, msg)
+ return psd, nil
+
+ // NoData is returned instead of RowDescription when there is no expected result. e.g. An INSERT without a RETURNING
+ // clause.
+ case *pgproto3.NoData:
+ return psd, nil
+
+ // These should never happen here. But don't take chances that could lead to a deadlock.
+ case *pgproto3.ErrorResponse:
+ pgErr := ErrorResponseToPgError(msg)
+ return nil, pgErr
+ case *pgproto3.CommandComplete:
+ p.conn.asyncClose()
+ return nil, errors.New("BUG: received CommandComplete while handling Describe")
+ case *pgproto3.ReadyForQuery:
+ p.conn.asyncClose()
+ return nil, errors.New("BUG: received ReadyForQuery while handling Describe")
+ }
+ }
+}
+
+// Close closes the pipeline and returns the connection to normal mode.
+func (p *Pipeline) Close() error {
+ if p.closed {
+ return p.err
+ }
+
+ p.closed = true
+
+ if p.pendingSync {
+ p.conn.asyncClose()
+ p.err = errors.New("pipeline has unsynced requests")
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+
+ return p.err
+ }
+
+ for p.expectedReadyForQueryCount > 0 {
+ _, err := p.getResults()
+ if err != nil {
+ p.err = err
+ var pgErr *PgError
+ if !errors.As(err, &pgErr) {
+ p.conn.asyncClose()
+ break
+ }
+ }
+ }
+
+ p.conn.contextWatcher.Unwatch()
+ p.conn.unlock()
+
+ return p.err
+}
+
+// DeadlineContextWatcherHandler handles canceled contexts by setting a deadline on a net.Conn.
+type DeadlineContextWatcherHandler struct {
+ Conn net.Conn
+
+ // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
+ DeadlineDelay time.Duration
+}
+
+func (h *DeadlineContextWatcherHandler) HandleCancel(ctx context.Context) {
+ h.Conn.SetDeadline(time.Now().Add(h.DeadlineDelay))
+}
+
+func (h *DeadlineContextWatcherHandler) HandleUnwatchAfterCancel() {
+ h.Conn.SetDeadline(time.Time{})
+}
+
+// CancelRequestContextWatcherHandler handles canceled contexts by sending a cancel request to the server. It also sets
+// a deadline on a net.Conn as a fallback.
+type CancelRequestContextWatcherHandler struct {
+ Conn *PgConn
+
+ // CancelRequestDelay is the delay before sending the cancel request to the server.
+ CancelRequestDelay time.Duration
+
+ // DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
+ DeadlineDelay time.Duration
+
+ cancelFinishedChan chan struct{}
+ handleUnwatchAfterCancelCalled func()
+}
+
+func (h *CancelRequestContextWatcherHandler) HandleCancel(context.Context) {
+ h.cancelFinishedChan = make(chan struct{})
+ var handleUnwatchedAfterCancelCalledCtx context.Context
+ handleUnwatchedAfterCancelCalledCtx, h.handleUnwatchAfterCancelCalled = context.WithCancel(context.Background())
+
+ deadline := time.Now().Add(h.DeadlineDelay)
+ h.Conn.conn.SetDeadline(deadline)
+
+ go func() {
+ defer close(h.cancelFinishedChan)
+
+ select {
+ case <-handleUnwatchedAfterCancelCalledCtx.Done():
+ return
+ case <-time.After(h.CancelRequestDelay):
+ }
+
+ cancelRequestCtx, cancel := context.WithDeadline(handleUnwatchedAfterCancelCalledCtx, deadline)
+ defer cancel()
+ h.Conn.CancelRequest(cancelRequestCtx)
+
+ // CancelRequest is inherently racy. Even though the cancel request has been received by the server at this point,
+ // it hasn't necessarily been delivered to the other connection. If we immediately return and the connection is
+ // immediately used then it is possible the CancelRequest will actually cancel our next query. The
+ // TestCancelRequestContextWatcherHandler Stress test can produce this error without the sleep below. The sleep time
+ // is arbitrary, but should be sufficient to prevent this error case.
+ time.Sleep(100 * time.Millisecond)
+ }()
+}
+
+func (h *CancelRequestContextWatcherHandler) HandleUnwatchAfterCancel() {
+ h.handleUnwatchAfterCancelCalled()
+ <-h.cancelFinishedChan
+
+ h.Conn.conn.SetDeadline(time.Time{})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/README.md b/vendor/github.com/jackc/pgx/v5/pgproto3/README.md
new file mode 100644
index 0000000..7a26f1c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/README.md
@@ -0,0 +1,7 @@
+# pgproto3
+
+Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
+
+pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.
+
+See example/pgfortune for a playful example of a fake PostgreSQL server.
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go
new file mode 100644
index 0000000..ac2962e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_cleartext_password.go
@@ -0,0 +1,51 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationCleartextPassword is a message sent from the backend indicating that a clear-text password is required.
+type AuthenticationCleartextPassword struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationCleartextPassword) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationCleartextPassword) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationCleartextPassword) Decode(src []byte) error {
+ if len(src) != 4 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeCleartextPassword {
+ return errors.New("bad auth type")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationCleartextPassword) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeCleartextPassword)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationCleartextPassword) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "AuthenticationCleartextPassword",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go
new file mode 100644
index 0000000..178ef31
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type AuthenticationGSS struct{}
+
+func (a *AuthenticationGSS) Backend() {}
+
+func (a *AuthenticationGSS) AuthenticationResponse() {}
+
+func (a *AuthenticationGSS) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeGSS {
+ return errors.New("bad auth type")
+ }
+ return nil
+}
+
+func (a *AuthenticationGSS) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeGSS)
+ return finishMessage(dst, sp)
+}
+
+func (a *AuthenticationGSS) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "AuthenticationGSS",
+ })
+}
+
+func (a *AuthenticationGSS) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go
new file mode 100644
index 0000000..2ba3f3b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_gss_continue.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type AuthenticationGSSContinue struct {
+ Data []byte
+}
+
+func (a *AuthenticationGSSContinue) Backend() {}
+
+func (a *AuthenticationGSSContinue) AuthenticationResponse() {}
+
+func (a *AuthenticationGSSContinue) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeGSSCont {
+ return errors.New("bad auth type")
+ }
+
+ a.Data = src[4:]
+ return nil
+}
+
+func (a *AuthenticationGSSContinue) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeGSSCont)
+ dst = append(dst, a.Data...)
+ return finishMessage(dst, sp)
+}
+
+func (a *AuthenticationGSSContinue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "AuthenticationGSSContinue",
+ Data: a.Data,
+ })
+}
+
+func (a *AuthenticationGSSContinue) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Data []byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ a.Data = msg.Data
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go
new file mode 100644
index 0000000..854c640
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_md5_password.go
@@ -0,0 +1,76 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationMD5Password is a message sent from the backend indicating that an MD5 hashed password is required.
+type AuthenticationMD5Password struct {
+ Salt [4]byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationMD5Password) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationMD5Password) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationMD5Password) Decode(src []byte) error {
+ if len(src) != 8 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeMD5Password {
+ return errors.New("bad auth type")
+ }
+
+ copy(dst.Salt[:], src[4:8])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationMD5Password) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeMD5Password)
+ dst = append(dst, src.Salt[:]...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationMD5Password) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Salt [4]byte
+ }{
+ Type: "AuthenticationMD5Password",
+ Salt: src.Salt,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationMD5Password) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Salt [4]byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Salt = msg.Salt
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go
new file mode 100644
index 0000000..ec11d39
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_ok.go
@@ -0,0 +1,51 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationOk is a message sent from the backend indicating that authentication was successful.
+type AuthenticationOk struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationOk) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationOk) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationOk) Decode(src []byte) error {
+ if len(src) != 4 {
+ return errors.New("bad authentication message size")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeOk {
+ return errors.New("bad auth type")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationOk) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeOk)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationOk) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "AuthenticationOK",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go
new file mode 100644
index 0000000..e66580f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl.go
@@ -0,0 +1,72 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASL is a message sent from the backend indicating that SASL authentication is required.
+type AuthenticationSASL struct {
+ AuthMechanisms []string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASL) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASL) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASL) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASL {
+ return errors.New("bad auth type")
+ }
+
+ authMechanisms := src[4:]
+ for len(authMechanisms) > 1 {
+ idx := bytes.IndexByte(authMechanisms, 0)
+ if idx == -1 {
+ return &invalidMessageFormatErr{messageType: "AuthenticationSASL", details: "unterminated string"}
+ }
+ dst.AuthMechanisms = append(dst.AuthMechanisms, string(authMechanisms[:idx]))
+ authMechanisms = authMechanisms[idx+1:]
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASL) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASL)
+
+ for _, s := range src.AuthMechanisms {
+ dst = append(dst, []byte(s)...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationSASL) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ AuthMechanisms []string
+ }{
+ Type: "AuthenticationSASL",
+ AuthMechanisms: src.AuthMechanisms,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go
new file mode 100644
index 0000000..70fba4a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_continue.go
@@ -0,0 +1,75 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASLContinue is a message sent from the backend containing a SASL challenge.
+type AuthenticationSASLContinue struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASLContinue) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASLContinue) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASLContinue) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASLContinue {
+ return errors.New("bad auth type")
+ }
+
+ dst.Data = src[4:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASLContinue) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASLContinue)
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src AuthenticationSASLContinue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "AuthenticationSASLContinue",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationSASLContinue) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go
new file mode 100644
index 0000000..84976c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/authentication_sasl_final.go
@@ -0,0 +1,75 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// AuthenticationSASLFinal is a message sent from the backend indicating a SASL authentication has completed.
+type AuthenticationSASLFinal struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*AuthenticationSASLFinal) Backend() {}
+
+// Backend identifies this message as an authentication response.
+func (*AuthenticationSASLFinal) AuthenticationResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *AuthenticationSASLFinal) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("authentication message too short")
+ }
+
+ authType := binary.BigEndian.Uint32(src)
+
+ if authType != AuthTypeSASLFinal {
+ return errors.New("bad auth type")
+ }
+
+ dst.Data = src[4:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *AuthenticationSASLFinal) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'R')
+ dst = pgio.AppendUint32(dst, AuthTypeSASLFinal)
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Unmarshaler.
+func (src AuthenticationSASLFinal) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "AuthenticationSASLFinal",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *AuthenticationSASLFinal) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go b/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go
new file mode 100644
index 0000000..d146c33
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/backend.go
@@ -0,0 +1,292 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Backend acts as a server for the PostgreSQL wire protocol version 3.
+type Backend struct {
+ cr *chunkReader
+ w io.Writer
+
+ // tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
+ // before it is actually transmitted (i.e. before Flush).
+ tracer *tracer
+
+ wbuf []byte
+ encodeError error
+
+ // Frontend message flyweights
+ bind Bind
+ cancelRequest CancelRequest
+ _close Close
+ copyFail CopyFail
+ copyData CopyData
+ copyDone CopyDone
+ describe Describe
+ execute Execute
+ flush Flush
+ functionCall FunctionCall
+ gssEncRequest GSSEncRequest
+ parse Parse
+ query Query
+ sslRequest SSLRequest
+ startupMessage StartupMessage
+ sync Sync
+ terminate Terminate
+
+ bodyLen int
+ maxBodyLen int // maxBodyLen is the maximum length of a message body in octets. If a message body exceeds this length, Receive will return an error.
+ msgType byte
+ partialMsg bool
+ authType uint32
+}
+
+const (
+ minStartupPacketLen = 4 // minStartupPacketLen is a single 32-bit int version or code.
+ maxStartupPacketLen = 10000 // maxStartupPacketLen is MAX_STARTUP_PACKET_LENGTH from PG source.
+)
+
+// NewBackend creates a new Backend.
+func NewBackend(r io.Reader, w io.Writer) *Backend {
+ cr := newChunkReader(r, 0)
+ return &Backend{cr: cr, w: w}
+}
+
+// Send sends a message to the frontend (i.e. the client). The message is buffered until Flush is called. Any error
+// encountered will be returned from Flush.
+func (b *Backend) Send(msg BackendMessage) {
+ if b.encodeError != nil {
+ return
+ }
+
+ prevLen := len(b.wbuf)
+ newBuf, err := msg.Encode(b.wbuf)
+ if err != nil {
+ b.encodeError = err
+ return
+ }
+ b.wbuf = newBuf
+
+ if b.tracer != nil {
+ b.tracer.traceMessage('B', int32(len(b.wbuf)-prevLen), msg)
+ }
+}
+
+// Flush writes any pending messages to the frontend (i.e. the client).
+func (b *Backend) Flush() error {
+ if err := b.encodeError; err != nil {
+ b.encodeError = nil
+ b.wbuf = b.wbuf[:0]
+ return &writeError{err: err, safeToRetry: true}
+ }
+
+ n, err := b.w.Write(b.wbuf)
+
+ const maxLen = 1024
+ if len(b.wbuf) > maxLen {
+ b.wbuf = make([]byte, 0, maxLen)
+ } else {
+ b.wbuf = b.wbuf[:0]
+ }
+
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ return nil
+}
+
+// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
+// PQtrace.
+func (b *Backend) Trace(w io.Writer, options TracerOptions) {
+ b.tracer = &tracer{
+ w: w,
+ buf: &bytes.Buffer{},
+ TracerOptions: options,
+ }
+}
+
+// Untrace stops tracing.
+func (b *Backend) Untrace() {
+ b.tracer = nil
+}
+
+// ReceiveStartupMessage receives the initial connection message. This method is used of the normal Receive method
+// because the initial connection message is "special" and does not include the message type as the first byte. This
+// will return either a StartupMessage, SSLRequest, GSSEncRequest, or CancelRequest.
+func (b *Backend) ReceiveStartupMessage() (FrontendMessage, error) {
+ buf, err := b.cr.Next(4)
+ if err != nil {
+ return nil, err
+ }
+ msgSize := int(binary.BigEndian.Uint32(buf) - 4)
+
+ if msgSize < minStartupPacketLen || msgSize > maxStartupPacketLen {
+ return nil, fmt.Errorf("invalid length of startup packet: %d", msgSize)
+ }
+
+ buf, err = b.cr.Next(msgSize)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ code := binary.BigEndian.Uint32(buf)
+
+ switch code {
+ case ProtocolVersionNumber:
+ err = b.startupMessage.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.startupMessage, nil
+ case sslRequestNumber:
+ err = b.sslRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.sslRequest, nil
+ case cancelRequestCode:
+ err = b.cancelRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.cancelRequest, nil
+ case gssEncReqNumber:
+ err = b.gssEncRequest.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+ return &b.gssEncRequest, nil
+ default:
+ return nil, fmt.Errorf("unknown startup message code: %d", code)
+ }
+}
+
+// Receive receives a message from the frontend. The returned message is only valid until the next call to Receive.
+func (b *Backend) Receive() (FrontendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ b.msgType = header[0]
+ b.bodyLen = int(binary.BigEndian.Uint32(header[1:])) - 4
+ if b.maxBodyLen > 0 && b.bodyLen > b.maxBodyLen {
+ return nil, &ExceededMaxBodyLenErr{b.maxBodyLen, b.bodyLen}
+ }
+ b.partialMsg = true
+ }
+
+ var msg FrontendMessage
+ switch b.msgType {
+ case 'B':
+ msg = &b.bind
+ case 'C':
+ msg = &b._close
+ case 'D':
+ msg = &b.describe
+ case 'E':
+ msg = &b.execute
+ case 'F':
+ msg = &b.functionCall
+ case 'f':
+ msg = &b.copyFail
+ case 'd':
+ msg = &b.copyData
+ case 'c':
+ msg = &b.copyDone
+ case 'H':
+ msg = &b.flush
+ case 'P':
+ msg = &b.parse
+ case 'p':
+ switch b.authType {
+ case AuthTypeSASL:
+ msg = &SASLInitialResponse{}
+ case AuthTypeSASLContinue:
+ msg = &SASLResponse{}
+ case AuthTypeSASLFinal:
+ msg = &SASLResponse{}
+ case AuthTypeGSS, AuthTypeGSSCont:
+ msg = &GSSResponse{}
+ case AuthTypeCleartextPassword, AuthTypeMD5Password:
+ fallthrough
+ default:
+ // to maintain backwards compatibility
+ msg = &PasswordMessage{}
+ }
+ case 'Q':
+ msg = &b.query
+ case 'S':
+ msg = &b.sync
+ case 'X':
+ msg = &b.terminate
+ default:
+ return nil, fmt.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ if err != nil {
+ return nil, err
+ }
+
+ if b.tracer != nil {
+ b.tracer.traceMessage('F', int32(5+len(msgBody)), msg)
+ }
+
+ return msg, nil
+}
+
+// SetAuthType sets the authentication type in the backend.
+// Since multiple message types can start with 'p', SetAuthType allows
+// contextual identification of FrontendMessages. For example, in the
+// PG message flow documentation for PasswordMessage:
+//
+// Byte1('p')
+//
+// Identifies the message as a password response. Note that this is also used for
+// GSSAPI, SSPI and SASL response messages. The exact message type can be deduced from
+// the context.
+//
+// Since the Frontend does not know about the state of a backend, it is important
+// to call SetAuthType() after an authentication request is received by the Frontend.
+func (b *Backend) SetAuthType(authType uint32) error {
+ switch authType {
+ case AuthTypeOk,
+ AuthTypeCleartextPassword,
+ AuthTypeMD5Password,
+ AuthTypeSCMCreds,
+ AuthTypeGSS,
+ AuthTypeGSSCont,
+ AuthTypeSSPI,
+ AuthTypeSASL,
+ AuthTypeSASLContinue,
+ AuthTypeSASLFinal:
+ b.authType = authType
+ default:
+ return fmt.Errorf("authType not recognized: %d", authType)
+ }
+
+ return nil
+}
+
+// SetMaxBodyLen sets the maximum length of a message body in octets. If a message body exceeds this length, Receive will return
+// an error. This is useful for protecting against malicious clients that send large messages with the intent of
+// causing memory exhaustion.
+// The default value is 0.
+// If maxBodyLen is 0, then no maximum is enforced.
+func (b *Backend) SetMaxBodyLen(maxBodyLen int) {
+ b.maxBodyLen = maxBodyLen
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go
new file mode 100644
index 0000000..23f5da6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/backend_key_data.go
@@ -0,0 +1,50 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BackendKeyData struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*BackendKeyData) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *BackendKeyData) Decode(src []byte) error {
+ if len(src) != 8 {
+ return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[:4])
+ dst.SecretKey = binary.BigEndian.Uint32(src[4:])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *BackendKeyData) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'K')
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src BackendKeyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "BackendKeyData",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go b/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go
new file mode 100644
index 0000000..f7bdb97
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/big_endian.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+)
+
+type BigEndianBuf [8]byte
+
+func (b BigEndianBuf) Int16(n int16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, uint16(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint16(n uint16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int32(n int32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, uint32(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint32(n uint32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int64(n int64) []byte {
+ buf := b[0:8]
+ binary.BigEndian.PutUint64(buf, uint64(n))
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go b/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go
new file mode 100644
index 0000000..ad6ac48
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/bind.go
@@ -0,0 +1,223 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Bind struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters [][]byte
+ ResultFormatCodes []int16
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Bind) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Bind) Decode(src []byte) error {
+ *dst = Bind{}
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.DestinationPortal = string(src[:idx])
+ rp := idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.PreparedStatement = string(src[rp : rp+idx])
+ rp += idx + 1
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterFormatCodeCount > 0 {
+ dst.ParameterFormatCodes = make([]int16, parameterFormatCodeCount)
+
+ if len(src[rp:]) < len(dst.ParameterFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < parameterFormatCodeCount; i++ {
+ dst.ParameterFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterCount > 0 {
+ dst.Parameters = make([][]byte, parameterCount)
+
+ for i := 0; i < parameterCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ continue
+ }
+
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ dst.Parameters[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ resultFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.ResultFormatCodes = make([]int16, resultFormatCodeCount)
+ if len(src[rp:]) < len(dst.ResultFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < resultFormatCodeCount; i++ {
+ dst.ResultFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Bind) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'B')
+
+ dst = append(dst, src.DestinationPortal...)
+ dst = append(dst, 0)
+ dst = append(dst, src.PreparedStatement...)
+ dst = append(dst, 0)
+
+ if len(src.ParameterFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many parameter format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterFormatCodes)))
+ for _, fc := range src.ParameterFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ if len(src.Parameters) > math.MaxUint16 {
+ return nil, errors.New("too many parameters")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Parameters)))
+ for _, p := range src.Parameters {
+ if p == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(p)))
+ dst = append(dst, p...)
+ }
+
+ if len(src.ResultFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many result format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ResultFormatCodes)))
+ for _, fc := range src.ResultFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Bind) MarshalJSON() ([]byte, error) {
+ formattedParameters := make([]map[string]string, len(src.Parameters))
+ for i, p := range src.Parameters {
+ if p == nil {
+ continue
+ }
+
+ textFormat := true
+ if len(src.ParameterFormatCodes) == 1 {
+ textFormat = src.ParameterFormatCodes[0] == 0
+ } else if len(src.ParameterFormatCodes) > 1 {
+ textFormat = src.ParameterFormatCodes[i] == 0
+ }
+
+ if textFormat {
+ formattedParameters[i] = map[string]string{"text": string(p)}
+ } else {
+ formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(p)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }{
+ Type: "Bind",
+ DestinationPortal: src.DestinationPortal,
+ PreparedStatement: src.PreparedStatement,
+ ParameterFormatCodes: src.ParameterFormatCodes,
+ Parameters: formattedParameters,
+ ResultFormatCodes: src.ResultFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Bind) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }
+ err := json.Unmarshal(data, &msg)
+ if err != nil {
+ return err
+ }
+ dst.DestinationPortal = msg.DestinationPortal
+ dst.PreparedStatement = msg.PreparedStatement
+ dst.ParameterFormatCodes = msg.ParameterFormatCodes
+ dst.Parameters = make([][]byte, len(msg.Parameters))
+ dst.ResultFormatCodes = msg.ResultFormatCodes
+ for n, parameter := range msg.Parameters {
+ dst.Parameters[n], err = getValueFromJSON(parameter)
+ if err != nil {
+ return fmt.Errorf("cannot get param %d: %w", n, err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go
new file mode 100644
index 0000000..bacf30d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/bind_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type BindComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*BindComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *BindComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "BindComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *BindComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '2', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src BindComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "BindComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go
new file mode 100644
index 0000000..6b52dd9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/cancel_request.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const cancelRequestCode = 80877102
+
+type CancelRequest struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CancelRequest) Frontend() {}
+
+func (dst *CancelRequest) Decode(src []byte) error {
+ if len(src) != 12 {
+ return errors.New("bad cancel request size")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != cancelRequestCode {
+ return errors.New("bad cancel request code")
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[4:])
+ dst.SecretKey = binary.BigEndian.Uint32(src[8:])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *CancelRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 16)
+ dst = pgio.AppendInt32(dst, cancelRequestCode)
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CancelRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "CancelRequest",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go b/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go
new file mode 100644
index 0000000..fc0fa61
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/chunkreader.go
@@ -0,0 +1,90 @@
+package pgproto3
+
+import (
+ "io"
+
+ "github.com/jackc/pgx/v5/internal/iobufpool"
+)
+
+// chunkReader is a io.Reader wrapper that minimizes IO reads and memory allocations. It allocates memory in chunks and
+// will read as much as will fit in the current buffer in a single call regardless of how large a read is actually
+// requested. The memory returned via Next is only valid until the next call to Next.
+//
+// This is roughly equivalent to a bufio.Reader that only uses Peek and Discard to never copy bytes.
+type chunkReader struct {
+ r io.Reader
+
+ buf *[]byte
+ rp, wp int // buf read position and write position
+
+ minBufSize int
+}
+
+// newChunkReader creates and returns a new chunkReader for r with default configuration. If minBufSize is <= 0 it uses
+// a default value.
+func newChunkReader(r io.Reader, minBufSize int) *chunkReader {
+ if minBufSize <= 0 {
+ // By historical reasons Postgres currently has 8KB send buffer inside,
+ // so here we want to have at least the same size buffer.
+ // @see https://github.com/postgres/postgres/blob/249d64999615802752940e017ee5166e726bc7cd/src/backend/libpq/pqcomm.c#L134
+ // @see https://www.postgresql.org/message-id/0cdc5485-cb3c-5e16-4a46-e3b2f7a41322%40ya.ru
+ //
+ // In addition, testing has found no benefit of any larger buffer.
+ minBufSize = 8192
+ }
+
+ return &chunkReader{
+ r: r,
+ minBufSize: minBufSize,
+ buf: iobufpool.Get(minBufSize),
+ }
+}
+
+// Next returns buf filled with the next n bytes. buf is only valid until next call of Next. If an error occurs, buf
+// will be nil.
+func (r *chunkReader) Next(n int) (buf []byte, err error) {
+ // Reset the buffer if it is empty
+ if r.rp == r.wp {
+ if len(*r.buf) != r.minBufSize {
+ iobufpool.Put(r.buf)
+ r.buf = iobufpool.Get(r.minBufSize)
+ }
+ r.rp = 0
+ r.wp = 0
+ }
+
+ // n bytes already in buf
+ if (r.wp - r.rp) >= n {
+ buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
+ r.rp += n
+ return buf, err
+ }
+
+ // buf is smaller than requested number of bytes
+ if len(*r.buf) < n {
+ bigBuf := iobufpool.Get(n)
+ r.wp = copy((*bigBuf), (*r.buf)[r.rp:r.wp])
+ r.rp = 0
+ iobufpool.Put(r.buf)
+ r.buf = bigBuf
+ }
+
+ // buf is large enough, but need to shift filled area to start to make enough contiguous space
+ minReadCount := n - (r.wp - r.rp)
+ if (len(*r.buf) - r.wp) < minReadCount {
+ r.wp = copy((*r.buf), (*r.buf)[r.rp:r.wp])
+ r.rp = 0
+ }
+
+ // Read at least the required number of bytes from the underlying io.Reader
+ readBytesCount, err := io.ReadAtLeast(r.r, (*r.buf)[r.wp:], minReadCount)
+ r.wp += readBytesCount
+ // fmt.Println("read", n)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = (*r.buf)[r.rp : r.rp+n : r.rp+n]
+ r.rp += n
+ return buf, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/close.go b/vendor/github.com/jackc/pgx/v5/pgproto3/close.go
new file mode 100644
index 0000000..0b50f27
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/close.go
@@ -0,0 +1,81 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+)
+
+type Close struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Close) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Close) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Close) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'C')
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Close) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Close",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Close) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ ObjectType string
+ Name string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.ObjectType) != 1 {
+ return errors.New("invalid length for Close.ObjectType")
+ }
+
+ dst.ObjectType = byte(msg.ObjectType[0])
+ dst.Name = msg.Name
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go
new file mode 100644
index 0000000..833f7a1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/close_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CloseComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CloseComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CloseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CloseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CloseComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '3', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CloseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CloseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go
new file mode 100644
index 0000000..eba7094
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/command_complete.go
@@ -0,0 +1,66 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type CommandComplete struct {
+ CommandTag []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CommandComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CommandComplete) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx == -1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete", details: "unterminated string"}
+ }
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete", details: "string terminated too early"}
+ }
+
+ dst.CommandTag = src[:idx]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CommandComplete) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'C')
+ dst = append(dst, src.CommandTag...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CommandComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ CommandTag string
+ }{
+ Type: "CommandComplete",
+ CommandTag: string(src.CommandTag),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CommandComplete) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ CommandTag string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.CommandTag = []byte(msg.CommandTag)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go
new file mode 100644
index 0000000..99e1afe
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_both_response.go
@@ -0,0 +1,95 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyBothResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyBothResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyBothResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyBothResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyBothResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'W')
+ dst = append(dst, src.OverallFormat)
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyBothResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyBothResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyBothResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyBothResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go
new file mode 100644
index 0000000..89ecdd4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_data.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+)
+
+type CopyData struct {
+ Data []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyData) Backend() {}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyData) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyData) Decode(src []byte) error {
+ dst.Data = src
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyData) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'd')
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "CopyData",
+ Data: hex.EncodeToString(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyData) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Data = []byte(msg.Data)
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go
new file mode 100644
index 0000000..040814d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_done.go
@@ -0,0 +1,38 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CopyDone struct {
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyDone) Backend() {}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyDone) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyDone) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CopyDone", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyDone) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'c', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyDone) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CopyDone",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go
new file mode 100644
index 0000000..72a85fd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_fail.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type CopyFail struct {
+ Message string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*CopyFail) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyFail) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CopyFail"}
+ }
+
+ dst.Message = string(src[:idx])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyFail) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'f')
+ dst = append(dst, src.Message...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyFail) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Message string
+ }{
+ Type: "CopyFail",
+ Message: src.Message,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go
new file mode 100644
index 0000000..06cf99c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_in_response.go
@@ -0,0 +1,96 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyInResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*CopyInResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyInResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyInResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyInResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'G')
+
+ dst = append(dst, src.OverallFormat)
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyInResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyInResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyInResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyInResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go
new file mode 100644
index 0000000..549e916
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/copy_out_response.go
@@ -0,0 +1,96 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CopyOutResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyOutResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *CopyOutResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyOutResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *CopyOutResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'H')
+
+ dst = append(dst, src.OverallFormat)
+
+ if len(src.ColumnFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many column format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src CopyOutResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyOutResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *CopyOutResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ OverallFormat string
+ ColumnFormatCodes []uint16
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ if len(msg.OverallFormat) != 1 {
+ return errors.New("invalid length for CopyOutResponse.OverallFormat")
+ }
+
+ dst.OverallFormat = msg.OverallFormat[0]
+ dst.ColumnFormatCodes = msg.ColumnFormatCodes
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go b/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go
new file mode 100644
index 0000000..fdfb0f7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/data_row.go
@@ -0,0 +1,143 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type DataRow struct {
+ Values [][]byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*DataRow) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *DataRow) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+ rp := 0
+ fieldCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ // If the capacity of the values slice is too small OR substantially too
+ // large reallocate. This is too avoid one row with many columns from
+ // permanently allocating memory.
+ if cap(dst.Values) < fieldCount || cap(dst.Values)-fieldCount > 32 {
+ newCap := 32
+ if newCap < fieldCount {
+ newCap = fieldCount
+ }
+ dst.Values = make([][]byte, fieldCount, newCap)
+ } else {
+ dst.Values = dst.Values[:fieldCount]
+ }
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if valueLen == -1 {
+ dst.Values[i] = nil
+ } else {
+ if len(src[rp:]) < valueLen || valueLen < 0 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ dst.Values[i] = src[rp : rp+valueLen : rp+valueLen]
+ rp += valueLen
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *DataRow) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'D')
+
+ if len(src.Values) > math.MaxUint16 {
+ return nil, errors.New("too many values")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Values)))
+ for _, v := range src.Values {
+ if v == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(v)))
+ dst = append(dst, v...)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src DataRow) MarshalJSON() ([]byte, error) {
+ formattedValues := make([]map[string]string, len(src.Values))
+ for i, v := range src.Values {
+ if v == nil {
+ continue
+ }
+
+ var hasNonPrintable bool
+ for _, b := range v {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValues[i] = map[string]string{"binary": hex.EncodeToString(v)}
+ } else {
+ formattedValues[i] = map[string]string{"text": string(v)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Values []map[string]string
+ }{
+ Type: "DataRow",
+ Values: formattedValues,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *DataRow) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Values []map[string]string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Values = make([][]byte, len(msg.Values))
+ for n, parameter := range msg.Values {
+ var err error
+ dst.Values[n], err = getValueFromJSON(parameter)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go b/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go
new file mode 100644
index 0000000..89feff2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/describe.go
@@ -0,0 +1,80 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+)
+
+type Describe struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Describe) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Describe) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Describe) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'D')
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Describe) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Describe",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *Describe) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ ObjectType string
+ Name string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if len(msg.ObjectType) != 1 {
+ return errors.New("invalid length for Describe.ObjectType")
+ }
+
+ dst.ObjectType = byte(msg.ObjectType[0])
+ dst.Name = msg.Name
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go b/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
new file mode 100644
index 0000000..0afd18e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
@@ -0,0 +1,11 @@
+// Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
+//
+// The primary interfaces are Frontend and Backend. They correspond to a client and server respectively. Messages are
+// sent with Send (or a specialized Send variant). Messages are automatically buffered to minimize small writes. Call
+// Flush to ensure a message has actually been sent.
+//
+// The Trace method of Frontend and Backend can be used to examine the wire-level message traffic. It outputs in a
+// similar format to the PQtrace function in libpq.
+//
+// See https://www.postgresql.org/docs/current/protocol-message-formats.html for meanings of the different messages.
+package pgproto3
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go
new file mode 100644
index 0000000..cb6cca0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/empty_query_response.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type EmptyQueryResponse struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*EmptyQueryResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *EmptyQueryResponse) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "EmptyQueryResponse", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *EmptyQueryResponse) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'I', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src EmptyQueryResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "EmptyQueryResponse",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go
new file mode 100644
index 0000000..6ef9bd0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/error_response.go
@@ -0,0 +1,326 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+)
+
+type ErrorResponse struct {
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ErrorResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ErrorResponse) Decode(src []byte) error {
+ *dst = ErrorResponse{}
+
+ buf := bytes.NewBuffer(src)
+
+ for {
+ k, err := buf.ReadByte()
+ if err != nil {
+ return err
+ }
+ if k == 0 {
+ break
+ }
+
+ vb, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ v := string(vb[:len(vb)-1])
+
+ switch k {
+ case 'S':
+ dst.Severity = v
+ case 'V':
+ dst.SeverityUnlocalized = v
+ case 'C':
+ dst.Code = v
+ case 'M':
+ dst.Message = v
+ case 'D':
+ dst.Detail = v
+ case 'H':
+ dst.Hint = v
+ case 'P':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Position = int32(n)
+ case 'p':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.InternalPosition = int32(n)
+ case 'q':
+ dst.InternalQuery = v
+ case 'W':
+ dst.Where = v
+ case 's':
+ dst.SchemaName = v
+ case 't':
+ dst.TableName = v
+ case 'c':
+ dst.ColumnName = v
+ case 'd':
+ dst.DataTypeName = v
+ case 'n':
+ dst.ConstraintName = v
+ case 'F':
+ dst.File = v
+ case 'L':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Line = int32(n)
+ case 'R':
+ dst.Routine = v
+
+ default:
+ if dst.UnknownFields == nil {
+ dst.UnknownFields = make(map[byte]string)
+ }
+ dst.UnknownFields[k] = v
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ErrorResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'E')
+ dst = src.appendFields(dst)
+ return finishMessage(dst, sp)
+}
+
+func (src *ErrorResponse) appendFields(dst []byte) []byte {
+ if src.Severity != "" {
+ dst = append(dst, 'S')
+ dst = append(dst, src.Severity...)
+ dst = append(dst, 0)
+ }
+ if src.SeverityUnlocalized != "" {
+ dst = append(dst, 'V')
+ dst = append(dst, src.SeverityUnlocalized...)
+ dst = append(dst, 0)
+ }
+ if src.Code != "" {
+ dst = append(dst, 'C')
+ dst = append(dst, src.Code...)
+ dst = append(dst, 0)
+ }
+ if src.Message != "" {
+ dst = append(dst, 'M')
+ dst = append(dst, src.Message...)
+ dst = append(dst, 0)
+ }
+ if src.Detail != "" {
+ dst = append(dst, 'D')
+ dst = append(dst, src.Detail...)
+ dst = append(dst, 0)
+ }
+ if src.Hint != "" {
+ dst = append(dst, 'H')
+ dst = append(dst, src.Hint...)
+ dst = append(dst, 0)
+ }
+ if src.Position != 0 {
+ dst = append(dst, 'P')
+ dst = append(dst, strconv.Itoa(int(src.Position))...)
+ dst = append(dst, 0)
+ }
+ if src.InternalPosition != 0 {
+ dst = append(dst, 'p')
+ dst = append(dst, strconv.Itoa(int(src.InternalPosition))...)
+ dst = append(dst, 0)
+ }
+ if src.InternalQuery != "" {
+ dst = append(dst, 'q')
+ dst = append(dst, src.InternalQuery...)
+ dst = append(dst, 0)
+ }
+ if src.Where != "" {
+ dst = append(dst, 'W')
+ dst = append(dst, src.Where...)
+ dst = append(dst, 0)
+ }
+ if src.SchemaName != "" {
+ dst = append(dst, 's')
+ dst = append(dst, src.SchemaName...)
+ dst = append(dst, 0)
+ }
+ if src.TableName != "" {
+ dst = append(dst, 't')
+ dst = append(dst, src.TableName...)
+ dst = append(dst, 0)
+ }
+ if src.ColumnName != "" {
+ dst = append(dst, 'c')
+ dst = append(dst, src.ColumnName...)
+ dst = append(dst, 0)
+ }
+ if src.DataTypeName != "" {
+ dst = append(dst, 'd')
+ dst = append(dst, src.DataTypeName...)
+ dst = append(dst, 0)
+ }
+ if src.ConstraintName != "" {
+ dst = append(dst, 'n')
+ dst = append(dst, src.ConstraintName...)
+ dst = append(dst, 0)
+ }
+ if src.File != "" {
+ dst = append(dst, 'F')
+ dst = append(dst, src.File...)
+ dst = append(dst, 0)
+ }
+ if src.Line != 0 {
+ dst = append(dst, 'L')
+ dst = append(dst, strconv.Itoa(int(src.Line))...)
+ dst = append(dst, 0)
+ }
+ if src.Routine != "" {
+ dst = append(dst, 'R')
+ dst = append(dst, src.Routine...)
+ dst = append(dst, 0)
+ }
+
+ for k, v := range src.UnknownFields {
+ dst = append(dst, k)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+
+ dst = append(dst, 0)
+
+ return dst
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ErrorResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+ }{
+ Type: "ErrorResponse",
+ Severity: src.Severity,
+ SeverityUnlocalized: src.SeverityUnlocalized,
+ Code: src.Code,
+ Message: src.Message,
+ Detail: src.Detail,
+ Hint: src.Hint,
+ Position: src.Position,
+ InternalPosition: src.InternalPosition,
+ InternalQuery: src.InternalQuery,
+ Where: src.Where,
+ SchemaName: src.SchemaName,
+ TableName: src.TableName,
+ ColumnName: src.ColumnName,
+ DataTypeName: src.DataTypeName,
+ ConstraintName: src.ConstraintName,
+ File: src.File,
+ Line: src.Line,
+ Routine: src.Routine,
+ UnknownFields: src.UnknownFields,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *ErrorResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Type string
+ Severity string
+ SeverityUnlocalized string // only in 9.6 and greater
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+
+ dst.Severity = msg.Severity
+ dst.SeverityUnlocalized = msg.SeverityUnlocalized
+ dst.Code = msg.Code
+ dst.Message = msg.Message
+ dst.Detail = msg.Detail
+ dst.Hint = msg.Hint
+ dst.Position = msg.Position
+ dst.InternalPosition = msg.InternalPosition
+ dst.InternalQuery = msg.InternalQuery
+ dst.Where = msg.Where
+ dst.SchemaName = msg.SchemaName
+ dst.TableName = msg.TableName
+ dst.ColumnName = msg.ColumnName
+ dst.DataTypeName = msg.DataTypeName
+ dst.ConstraintName = msg.ConstraintName
+ dst.File = msg.File
+ dst.Line = msg.Line
+ dst.Routine = msg.Routine
+
+ dst.UnknownFields = msg.UnknownFields
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go b/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go
new file mode 100644
index 0000000..31bc714
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/execute.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Execute struct {
+ Portal string
+ MaxRows uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Execute) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Execute) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Portal = string(b[:len(b)-1])
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Execute"}
+ }
+ dst.MaxRows = binary.BigEndian.Uint32(buf.Next(4))
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Execute) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'E')
+ dst = append(dst, src.Portal...)
+ dst = append(dst, 0)
+ dst = pgio.AppendUint32(dst, src.MaxRows)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Execute) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Portal string
+ MaxRows uint32
+ }{
+ Type: "Execute",
+ Portal: src.Portal,
+ MaxRows: src.MaxRows,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go b/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go
new file mode 100644
index 0000000..e5dc1fb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/flush.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Flush struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Flush) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Flush) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Flush", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Flush) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'H', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Flush) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Flush",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go b/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
new file mode 100644
index 0000000..b41abbe
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
@@ -0,0 +1,454 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Frontend acts as a client for the PostgreSQL wire protocol version 3.
+type Frontend struct {
+ cr *chunkReader
+ w io.Writer
+
+ // tracer is used to trace messages when Send or Receive is called. This means an outbound message is traced
+ // before it is actually transmitted (i.e. before Flush). It is safe to change this variable when the Frontend is
+ // idle. Setting and unsetting tracer provides equivalent functionality to PQtrace and PQuntrace in libpq.
+ tracer *tracer
+
+ wbuf []byte
+ encodeError error
+
+ // Backend message flyweights
+ authenticationOk AuthenticationOk
+ authenticationCleartextPassword AuthenticationCleartextPassword
+ authenticationMD5Password AuthenticationMD5Password
+ authenticationGSS AuthenticationGSS
+ authenticationGSSContinue AuthenticationGSSContinue
+ authenticationSASL AuthenticationSASL
+ authenticationSASLContinue AuthenticationSASLContinue
+ authenticationSASLFinal AuthenticationSASLFinal
+ backendKeyData BackendKeyData
+ bindComplete BindComplete
+ closeComplete CloseComplete
+ commandComplete CommandComplete
+ copyBothResponse CopyBothResponse
+ copyData CopyData
+ copyInResponse CopyInResponse
+ copyOutResponse CopyOutResponse
+ copyDone CopyDone
+ dataRow DataRow
+ emptyQueryResponse EmptyQueryResponse
+ errorResponse ErrorResponse
+ functionCallResponse FunctionCallResponse
+ noData NoData
+ noticeResponse NoticeResponse
+ notificationResponse NotificationResponse
+ parameterDescription ParameterDescription
+ parameterStatus ParameterStatus
+ parseComplete ParseComplete
+ readyForQuery ReadyForQuery
+ rowDescription RowDescription
+ portalSuspended PortalSuspended
+
+ bodyLen int
+ msgType byte
+ partialMsg bool
+ authType uint32
+}
+
+// NewFrontend creates a new Frontend.
+func NewFrontend(r io.Reader, w io.Writer) *Frontend {
+ cr := newChunkReader(r, 0)
+ return &Frontend{cr: cr, w: w}
+}
+
+// Send sends a message to the backend (i.e. the server). The message is buffered until Flush is called. Any error
+// encountered will be returned from Flush.
+//
+// Send can work with any FrontendMessage. Some commonly used message types such as Bind have specialized send methods
+// such as SendBind. These methods should be preferred when the type of message is known up front (e.g. when building an
+// extended query protocol query) as they may be faster due to knowing the type of msg rather than it being hidden
+// behind an interface.
+func (f *Frontend) Send(msg FrontendMessage) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceMessage('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// Flush writes any pending messages to the backend (i.e. the server).
+func (f *Frontend) Flush() error {
+ if err := f.encodeError; err != nil {
+ f.encodeError = nil
+ f.wbuf = f.wbuf[:0]
+ return &writeError{err: err, safeToRetry: true}
+ }
+
+ if len(f.wbuf) == 0 {
+ return nil
+ }
+
+ n, err := f.w.Write(f.wbuf)
+
+ const maxLen = 1024
+ if len(f.wbuf) > maxLen {
+ f.wbuf = make([]byte, 0, maxLen)
+ } else {
+ f.wbuf = f.wbuf[:0]
+ }
+
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ return nil
+}
+
+// Trace starts tracing the message traffic to w. It writes in a similar format to that produced by the libpq function
+// PQtrace.
+func (f *Frontend) Trace(w io.Writer, options TracerOptions) {
+ f.tracer = &tracer{
+ w: w,
+ buf: &bytes.Buffer{},
+ TracerOptions: options,
+ }
+}
+
+// Untrace stops tracing.
+func (f *Frontend) Untrace() {
+ f.tracer = nil
+}
+
+// SendBind sends a Bind message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendBind(msg *Bind) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceBind('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendParse sends a Parse message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendParse(msg *Parse) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceParse('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendClose sends a Close message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendClose(msg *Close) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceClose('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendDescribe sends a Describe message to the backend (i.e. the server). The message is buffered until Flush is
+// called. Any error encountered will be returned from Flush.
+func (f *Frontend) SendDescribe(msg *Describe) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceDescribe('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendExecute sends an Execute message to the backend (i.e. the server). The message is buffered until Flush is called.
+// Any error encountered will be returned from Flush.
+func (f *Frontend) SendExecute(msg *Execute) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.TraceQueryute('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendSync sends a Sync message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendSync(msg *Sync) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceSync('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendQuery sends a Query message to the backend (i.e. the server). The message is buffered until Flush is called. Any
+// error encountered will be returned from Flush.
+func (f *Frontend) SendQuery(msg *Query) {
+ if f.encodeError != nil {
+ return
+ }
+
+ prevLen := len(f.wbuf)
+ newBuf, err := msg.Encode(f.wbuf)
+ if err != nil {
+ f.encodeError = err
+ return
+ }
+ f.wbuf = newBuf
+
+ if f.tracer != nil {
+ f.tracer.traceQuery('F', int32(len(f.wbuf)-prevLen), msg)
+ }
+}
+
+// SendUnbufferedEncodedCopyData immediately sends an encoded CopyData message to the backend (i.e. the server). This method
+// is more efficient than sending a CopyData message with Send as the message data is not copied to the internal buffer
+// before being written out. The internal buffer is flushed before the message is sent.
+func (f *Frontend) SendUnbufferedEncodedCopyData(msg []byte) error {
+ err := f.Flush()
+ if err != nil {
+ return err
+ }
+
+ n, err := f.w.Write(msg)
+ if err != nil {
+ return &writeError{err: err, safeToRetry: n == 0}
+ }
+
+ if f.tracer != nil {
+ f.tracer.traceCopyData('F', int32(len(msg)-1), &CopyData{})
+ }
+
+ return nil
+}
+
+func translateEOFtoErrUnexpectedEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+// Receive receives a message from the backend. The returned message is only valid until the next call to Receive.
+func (f *Frontend) Receive() (BackendMessage, error) {
+ if !f.partialMsg {
+ header, err := f.cr.Next(5)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ f.msgType = header[0]
+
+ msgLength := int(binary.BigEndian.Uint32(header[1:]))
+ if msgLength < 4 {
+ return nil, fmt.Errorf("invalid message length: %d", msgLength)
+ }
+
+ f.bodyLen = msgLength - 4
+ f.partialMsg = true
+ }
+
+ msgBody, err := f.cr.Next(f.bodyLen)
+ if err != nil {
+ return nil, translateEOFtoErrUnexpectedEOF(err)
+ }
+
+ f.partialMsg = false
+
+ var msg BackendMessage
+ switch f.msgType {
+ case '1':
+ msg = &f.parseComplete
+ case '2':
+ msg = &f.bindComplete
+ case '3':
+ msg = &f.closeComplete
+ case 'A':
+ msg = &f.notificationResponse
+ case 'c':
+ msg = &f.copyDone
+ case 'C':
+ msg = &f.commandComplete
+ case 'd':
+ msg = &f.copyData
+ case 'D':
+ msg = &f.dataRow
+ case 'E':
+ msg = &f.errorResponse
+ case 'G':
+ msg = &f.copyInResponse
+ case 'H':
+ msg = &f.copyOutResponse
+ case 'I':
+ msg = &f.emptyQueryResponse
+ case 'K':
+ msg = &f.backendKeyData
+ case 'n':
+ msg = &f.noData
+ case 'N':
+ msg = &f.noticeResponse
+ case 'R':
+ var err error
+ msg, err = f.findAuthenticationMessageType(msgBody)
+ if err != nil {
+ return nil, err
+ }
+ case 's':
+ msg = &f.portalSuspended
+ case 'S':
+ msg = &f.parameterStatus
+ case 't':
+ msg = &f.parameterDescription
+ case 'T':
+ msg = &f.rowDescription
+ case 'V':
+ msg = &f.functionCallResponse
+ case 'W':
+ msg = &f.copyBothResponse
+ case 'Z':
+ msg = &f.readyForQuery
+ default:
+ return nil, fmt.Errorf("unknown message type: %c", f.msgType)
+ }
+
+ err = msg.Decode(msgBody)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.tracer != nil {
+ f.tracer.traceMessage('B', int32(5+len(msgBody)), msg)
+ }
+
+ return msg, nil
+}
+
+// Authentication message type constants.
+// See src/include/libpq/pqcomm.h for all
+// constants.
+const (
+ AuthTypeOk = 0
+ AuthTypeCleartextPassword = 3
+ AuthTypeMD5Password = 5
+ AuthTypeSCMCreds = 6
+ AuthTypeGSS = 7
+ AuthTypeGSSCont = 8
+ AuthTypeSSPI = 9
+ AuthTypeSASL = 10
+ AuthTypeSASLContinue = 11
+ AuthTypeSASLFinal = 12
+)
+
+func (f *Frontend) findAuthenticationMessageType(src []byte) (BackendMessage, error) {
+ if len(src) < 4 {
+ return nil, errors.New("authentication message too short")
+ }
+ f.authType = binary.BigEndian.Uint32(src[:4])
+
+ switch f.authType {
+ case AuthTypeOk:
+ return &f.authenticationOk, nil
+ case AuthTypeCleartextPassword:
+ return &f.authenticationCleartextPassword, nil
+ case AuthTypeMD5Password:
+ return &f.authenticationMD5Password, nil
+ case AuthTypeSCMCreds:
+ return nil, errors.New("AuthTypeSCMCreds is unimplemented")
+ case AuthTypeGSS:
+ return &f.authenticationGSS, nil
+ case AuthTypeGSSCont:
+ return &f.authenticationGSSContinue, nil
+ case AuthTypeSSPI:
+ return nil, errors.New("AuthTypeSSPI is unimplemented")
+ case AuthTypeSASL:
+ return &f.authenticationSASL, nil
+ case AuthTypeSASLContinue:
+ return &f.authenticationSASLContinue, nil
+ case AuthTypeSASLFinal:
+ return &f.authenticationSASLFinal, nil
+ default:
+ return nil, fmt.Errorf("unknown authentication type: %d", f.authType)
+ }
+}
+
+// GetAuthType returns the authType used in the current state of the frontend.
+// See SetAuthType for more information.
+func (f *Frontend) GetAuthType() uint32 {
+ return f.authType
+}
+
+func (f *Frontend) ReadBufferLen() int {
+ return f.cr.wp - f.cr.rp
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go
new file mode 100644
index 0000000..7d83579
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call.go
@@ -0,0 +1,102 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type FunctionCall struct {
+ Function uint32
+ ArgFormatCodes []uint16
+ Arguments [][]byte
+ ResultFormatCode uint16
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*FunctionCall) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *FunctionCall) Decode(src []byte) error {
+ *dst = FunctionCall{}
+ rp := 0
+ // Specifies the object ID of the function to call.
+ dst.Function = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ // The number of argument format codes that follow (denoted C below).
+ // This can be zero to indicate that there are no arguments or that the arguments all use the default format (text);
+ // or one, in which case the specified format code is applied to all arguments;
+ // or it can equal the actual number of arguments.
+ nArgumentCodes := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ argumentCodes := make([]uint16, nArgumentCodes)
+ for i := 0; i < nArgumentCodes; i++ {
+ // The argument format codes. Each must presently be zero (text) or one (binary).
+ ac := binary.BigEndian.Uint16(src[rp:])
+ if ac != 0 && ac != 1 {
+ return &invalidMessageFormatErr{messageType: "FunctionCall"}
+ }
+ argumentCodes[i] = ac
+ rp += 2
+ }
+ dst.ArgFormatCodes = argumentCodes
+
+ // Specifies the number of arguments being supplied to the function.
+ nArguments := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ arguments := make([][]byte, nArguments)
+ for i := 0; i < nArguments; i++ {
+ // The length of the argument value, in bytes (this count does not include itself). Can be zero.
+ // As a special case, -1 indicates a NULL argument value. No value bytes follow in the NULL case.
+ argumentLength := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ if argumentLength == -1 {
+ arguments[i] = nil
+ } else {
+ // The value of the argument, in the format indicated by the associated format code. n is the above length.
+ argumentValue := src[rp : rp+argumentLength]
+ rp += argumentLength
+ arguments[i] = argumentValue
+ }
+ }
+ dst.Arguments = arguments
+ // The format code for the function result. Must presently be zero (text) or one (binary).
+ resultFormatCode := binary.BigEndian.Uint16(src[rp:])
+ if resultFormatCode != 0 && resultFormatCode != 1 {
+ return &invalidMessageFormatErr{messageType: "FunctionCall"}
+ }
+ dst.ResultFormatCode = resultFormatCode
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *FunctionCall) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'F')
+ dst = pgio.AppendUint32(dst, src.Function)
+
+ if len(src.ArgFormatCodes) > math.MaxUint16 {
+ return nil, errors.New("too many arg format codes")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ArgFormatCodes)))
+ for _, argFormatCode := range src.ArgFormatCodes {
+ dst = pgio.AppendUint16(dst, argFormatCode)
+ }
+
+ if len(src.Arguments) > math.MaxUint16 {
+ return nil, errors.New("too many arguments")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Arguments)))
+ for _, argument := range src.Arguments {
+ if argument == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(argument)))
+ dst = append(dst, argument...)
+ }
+ }
+ dst = pgio.AppendUint16(dst, src.ResultFormatCode)
+ return finishMessage(dst, sp)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go
new file mode 100644
index 0000000..1f27349
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/function_call_response.go
@@ -0,0 +1,97 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type FunctionCallResponse struct {
+ Result []byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*FunctionCallResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *FunctionCallResponse) Decode(src []byte) error {
+ if len(src) < 4 {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+ rp := 0
+ resultSize := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if resultSize == -1 {
+ dst.Result = nil
+ return nil
+ }
+
+ if len(src[rp:]) != resultSize {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+
+ dst.Result = src[rp:]
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *FunctionCallResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'V')
+
+ if src.Result == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(src.Result)))
+ dst = append(dst, src.Result...)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src FunctionCallResponse) MarshalJSON() ([]byte, error) {
+ var formattedValue map[string]string
+ var hasNonPrintable bool
+ for _, b := range src.Result {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValue = map[string]string{"binary": hex.EncodeToString(src.Result)}
+ } else {
+ formattedValue = map[string]string{"text": string(src.Result)}
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Result map[string]string
+ }{
+ Type: "FunctionCallResponse",
+ Result: formattedValue,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *FunctionCallResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ Result map[string]string
+ }
+ err := json.Unmarshal(data, &msg)
+ if err != nil {
+ return err
+ }
+ dst.Result, err = getValueFromJSON(msg.Result)
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go
new file mode 100644
index 0000000..70cb20c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_enc_request.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const gssEncReqNumber = 80877104
+
+type GSSEncRequest struct {
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*GSSEncRequest) Frontend() {}
+
+func (dst *GSSEncRequest) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("gss encoding request too short")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != gssEncReqNumber {
+ return errors.New("bad gss encoding request code")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *GSSEncRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 8)
+ dst = pgio.AppendInt32(dst, gssEncReqNumber)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src GSSEncRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "GSSEncRequest",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go
new file mode 100644
index 0000000..10d9377
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/gss_response.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type GSSResponse struct {
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (g *GSSResponse) Frontend() {}
+
+func (g *GSSResponse) Decode(data []byte) error {
+ g.Data = data
+ return nil
+}
+
+func (g *GSSResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, g.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (g *GSSResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data []byte
+ }{
+ Type: "GSSResponse",
+ Data: g.Data,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (g *GSSResponse) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Data []byte
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ g.Data = msg.Data
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go b/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go
new file mode 100644
index 0000000..cbcaad4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/no_data.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type NoData struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NoData) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NoData) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "NoData", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NoData) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'n', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src NoData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "NoData",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go
new file mode 100644
index 0000000..497aba6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/notice_response.go
@@ -0,0 +1,19 @@
+package pgproto3
+
+type NoticeResponse ErrorResponse
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NoticeResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NoticeResponse) Decode(src []byte) error {
+ return (*ErrorResponse)(dst).Decode(src)
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NoticeResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'N')
+ dst = (*ErrorResponse)(src).appendFields(dst)
+ return finishMessage(dst, sp)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go
new file mode 100644
index 0000000..243b6bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/notification_response.go
@@ -0,0 +1,71 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type NotificationResponse struct {
+ PID uint32
+ Channel string
+ Payload string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*NotificationResponse) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *NotificationResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "NotificationResponse", details: "too short"}
+ }
+
+ pid := binary.BigEndian.Uint32(buf.Next(4))
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ channel := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ payload := string(b[:len(b)-1])
+
+ *dst = NotificationResponse{PID: pid, Channel: channel, Payload: payload}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *NotificationResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'A')
+ dst = pgio.AppendUint32(dst, src.PID)
+ dst = append(dst, src.Channel...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Payload...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src NotificationResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ PID uint32
+ Channel string
+ Payload string
+ }{
+ Type: "NotificationResponse",
+ PID: src.PID,
+ Channel: src.Channel,
+ Payload: src.Payload,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go
new file mode 100644
index 0000000..1ef27b7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_description.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type ParameterDescription struct {
+ ParameterOIDs []uint32
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParameterDescription) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParameterDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "ParameterDescription"}
+ }
+
+ // Reported parameter count will be incorrect when number of args is greater than uint16
+ buf.Next(2)
+ // Instead infer parameter count by remaining size of message
+ parameterCount := buf.Len() / 4
+
+ *dst = ParameterDescription{ParameterOIDs: make([]uint32, parameterCount)}
+
+ for i := 0; i < parameterCount; i++ {
+ dst.ParameterOIDs[i] = binary.BigEndian.Uint32(buf.Next(4))
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParameterDescription) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 't')
+
+ if len(src.ParameterOIDs) > math.MaxUint16 {
+ return nil, errors.New("too many parameter oids")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ParameterDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ParameterOIDs []uint32
+ }{
+ Type: "ParameterDescription",
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go
new file mode 100644
index 0000000..9ee0720
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parameter_status.go
@@ -0,0 +1,58 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type ParameterStatus struct {
+ Name string
+ Value string
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParameterStatus) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParameterStatus) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ name := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ value := string(b[:len(b)-1])
+
+ *dst = ParameterStatus{Name: name, Value: value}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParameterStatus) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'S')
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Value...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (ps ParameterStatus) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Value string
+ }{
+ Type: "ParameterStatus",
+ Name: ps.Name,
+ Value: ps.Value,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go
new file mode 100644
index 0000000..6ba3486
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parse.go
@@ -0,0 +1,89 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Parse struct {
+ Name string
+ Query string
+ ParameterOIDs []uint32
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Parse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Parse) Decode(src []byte) error {
+ *dst = Parse{}
+
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Name = string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Query = string(b[:len(b)-1])
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ parameterOIDCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ for i := 0; i < parameterOIDCount; i++ {
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ dst.ParameterOIDs = append(dst.ParameterOIDs, binary.BigEndian.Uint32(buf.Next(4)))
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Parse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'P')
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Query...)
+ dst = append(dst, 0)
+
+ if len(src.ParameterOIDs) > math.MaxUint16 {
+ return nil, errors.New("too many parameter oids")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Parse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Query string
+ ParameterOIDs []uint32
+ }{
+ Type: "Parse",
+ Name: src.Name,
+ Query: src.Query,
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go b/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go
new file mode 100644
index 0000000..cff9e27
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/parse_complete.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ParseComplete struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ParseComplete) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ParseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "ParseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ParseComplete) Encode(dst []byte) ([]byte, error) {
+ return append(dst, '1', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ParseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "ParseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go b/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go
new file mode 100644
index 0000000..d820d32
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/password_message.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type PasswordMessage struct {
+ Password string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*PasswordMessage) Frontend() {}
+
+// Frontend identifies this message as an authentication response.
+func (*PasswordMessage) InitialResponse() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *PasswordMessage) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Password = string(b[:len(b)-1])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *PasswordMessage) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, src.Password...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src PasswordMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Password string
+ }{
+ Type: "PasswordMessage",
+ Password: src.Password,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go
new file mode 100644
index 0000000..128f97f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/pgproto3.go
@@ -0,0 +1,120 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// maxMessageBodyLen is the maximum length of a message body in bytes. See PG_LARGE_MESSAGE_LIMIT in the PostgreSQL
+// source. It is defined as (MaxAllocSize - 1). MaxAllocSize is defined as 0x3fffffff.
+const maxMessageBodyLen = (0x3fffffff - 1)
+
+// Message is the interface implemented by an object that can decode and encode
+// a particular PostgreSQL message.
+type Message interface {
+ // Decode is allowed and expected to retain a reference to data after
+ // returning (unlike encoding.BinaryUnmarshaler).
+ Decode(data []byte) error
+
+ // Encode appends itself to dst and returns the new buffer.
+ Encode(dst []byte) ([]byte, error)
+}
+
+// FrontendMessage is a message sent by the frontend (i.e. the client).
+type FrontendMessage interface {
+ Message
+ Frontend() // no-op method to distinguish frontend from backend methods
+}
+
+// BackendMessage is a message sent by the backend (i.e. the server).
+type BackendMessage interface {
+ Message
+ Backend() // no-op method to distinguish frontend from backend methods
+}
+
+type AuthenticationResponseMessage interface {
+ BackendMessage
+ AuthenticationResponse() // no-op method to distinguish authentication responses
+}
+
+type invalidMessageLenErr struct {
+ messageType string
+ expectedLen int
+ actualLen int
+}
+
+func (e *invalidMessageLenErr) Error() string {
+ return fmt.Sprintf("%s body must have length of %d, but it is %d", e.messageType, e.expectedLen, e.actualLen)
+}
+
+type invalidMessageFormatErr struct {
+ messageType string
+ details string
+}
+
+func (e *invalidMessageFormatErr) Error() string {
+ return fmt.Sprintf("%s body is invalid %s", e.messageType, e.details)
+}
+
+type writeError struct {
+ err error
+ safeToRetry bool
+}
+
+func (e *writeError) Error() string {
+ return fmt.Sprintf("write failed: %s", e.err.Error())
+}
+
+func (e *writeError) SafeToRetry() bool {
+ return e.safeToRetry
+}
+
+func (e *writeError) Unwrap() error {
+ return e.err
+}
+
+type ExceededMaxBodyLenErr struct {
+ MaxExpectedBodyLen int
+ ActualBodyLen int
+}
+
+func (e *ExceededMaxBodyLenErr) Error() string {
+ return fmt.Sprintf("invalid body length: expected at most %d, but got %d", e.MaxExpectedBodyLen, e.ActualBodyLen)
+}
+
+// getValueFromJSON gets the value from a protocol message representation in JSON.
+func getValueFromJSON(v map[string]string) ([]byte, error) {
+ if v == nil {
+ return nil, nil
+ }
+ if text, ok := v["text"]; ok {
+ return []byte(text), nil
+ }
+ if binary, ok := v["binary"]; ok {
+ return hex.DecodeString(binary)
+ }
+ return nil, errors.New("unknown protocol representation")
+}
+
+// beginMessage begins a new message of type t. It appends the message type and a placeholder for the message length to
+// dst. It returns the new buffer and the position of the message length placeholder.
+func beginMessage(dst []byte, t byte) ([]byte, int) {
+ dst = append(dst, t)
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+ return dst, sp
+}
+
+// finishMessage finishes a message that was started with beginMessage. It computes the message length and writes it to
+// dst[sp]. If the message length is too large it returns an error. Otherwise it returns the final message buffer.
+func finishMessage(dst []byte, sp int) ([]byte, error) {
+ messageBodyLen := len(dst[sp:])
+ if messageBodyLen > maxMessageBodyLen {
+ return nil, errors.New("message body too large")
+ }
+ pgio.SetInt32(dst[sp:], int32(messageBodyLen))
+ return dst, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go b/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go
new file mode 100644
index 0000000..9e2f8cb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/portal_suspended.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type PortalSuspended struct{}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*PortalSuspended) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *PortalSuspended) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "PortalSuspended", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *PortalSuspended) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 's', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src PortalSuspended) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "PortalSuspended",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/query.go b/vendor/github.com/jackc/pgx/v5/pgproto3/query.go
new file mode 100644
index 0000000..aebdfde
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/query.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+)
+
+type Query struct {
+ String string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Query) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Query) Decode(src []byte) error {
+ i := bytes.IndexByte(src, 0)
+ if i != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "Query"}
+ }
+
+ dst.String = string(src[:i])
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Query) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'Q')
+ dst = append(dst, src.String...)
+ dst = append(dst, 0)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Query) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ String string
+ }{
+ Type: "Query",
+ String: src.String,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go b/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go
new file mode 100644
index 0000000..a56af9f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/ready_for_query.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+type ReadyForQuery struct {
+ TxStatus byte
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*ReadyForQuery) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *ReadyForQuery) Decode(src []byte) error {
+ if len(src) != 1 {
+ return &invalidMessageLenErr{messageType: "ReadyForQuery", expectedLen: 1, actualLen: len(src)}
+ }
+
+ dst.TxStatus = src[0]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *ReadyForQuery) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'Z', 0, 0, 0, 5, src.TxStatus), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src ReadyForQuery) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ TxStatus string
+ }{
+ Type: "ReadyForQuery",
+ TxStatus: string(src.TxStatus),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *ReadyForQuery) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ TxStatus string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if len(msg.TxStatus) != 1 {
+ return errors.New("invalid length for ReadyForQuery.TxStatus")
+ }
+ dst.TxStatus = msg.TxStatus[0]
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go b/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go
new file mode 100644
index 0000000..dc2a4dd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/row_description.go
@@ -0,0 +1,166 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "math"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const (
+ TextFormat = 0
+ BinaryFormat = 1
+)
+
+type FieldDescription struct {
+ Name []byte
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (fd FieldDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+ }{
+ Name: string(fd.Name),
+ TableOID: fd.TableOID,
+ TableAttributeNumber: fd.TableAttributeNumber,
+ DataTypeOID: fd.DataTypeOID,
+ DataTypeSize: fd.DataTypeSize,
+ TypeModifier: fd.TypeModifier,
+ Format: fd.Format,
+ })
+}
+
+type RowDescription struct {
+ Fields []FieldDescription
+}
+
+// Backend identifies this message as sendable by the PostgreSQL backend.
+func (*RowDescription) Backend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *RowDescription) Decode(src []byte) error {
+
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fieldCount := int(binary.BigEndian.Uint16(src))
+ rp := 2
+
+ dst.Fields = dst.Fields[0:0]
+
+ for i := 0; i < fieldCount; i++ {
+ var fd FieldDescription
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fd.Name = src[rp : rp+idx]
+ rp += idx + 1
+
+ // Since buf.Next() doesn't return an error if we hit the end of the buffer
+ // check Len ahead of time
+ if len(src[rp:]) < 18 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+
+ fd.TableOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ fd.TableAttributeNumber = binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ fd.DataTypeOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+ fd.DataTypeSize = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ fd.TypeModifier = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ fd.Format = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.Fields = append(dst.Fields, fd)
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *RowDescription) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'T')
+
+ if len(src.Fields) > math.MaxUint16 {
+ return nil, errors.New("too many fields")
+ }
+ dst = pgio.AppendUint16(dst, uint16(len(src.Fields)))
+ for _, fd := range src.Fields {
+ dst = append(dst, fd.Name...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, fd.TableOID)
+ dst = pgio.AppendUint16(dst, fd.TableAttributeNumber)
+ dst = pgio.AppendUint32(dst, fd.DataTypeOID)
+ dst = pgio.AppendInt16(dst, fd.DataTypeSize)
+ dst = pgio.AppendInt32(dst, fd.TypeModifier)
+ dst = pgio.AppendInt16(dst, fd.Format)
+ }
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src RowDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Fields []FieldDescription
+ }{
+ Type: "RowDescription",
+ Fields: src.Fields,
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *RowDescription) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Fields []struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier int32
+ Format int16
+ }
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ dst.Fields = make([]FieldDescription, len(msg.Fields))
+ for n, field := range msg.Fields {
+ dst.Fields[n] = FieldDescription{
+ Name: []byte(field.Name),
+ TableOID: field.TableOID,
+ TableAttributeNumber: field.TableAttributeNumber,
+ DataTypeOID: field.DataTypeOID,
+ DataTypeSize: field.DataTypeSize,
+ TypeModifier: field.TypeModifier,
+ Format: field.Format,
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go
new file mode 100644
index 0000000..9eb1b6a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_initial_response.go
@@ -0,0 +1,90 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type SASLInitialResponse struct {
+ AuthMechanism string
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SASLInitialResponse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *SASLInitialResponse) Decode(src []byte) error {
+ *dst = SASLInitialResponse{}
+
+ rp := 0
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return errors.New("invalid SASLInitialResponse")
+ }
+
+ dst.AuthMechanism = string(src[rp:idx])
+ rp = idx + 1
+
+ rp += 4 // The rest of the message is data so we can just skip the size
+ dst.Data = src[rp:]
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *SASLInitialResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+
+ dst = append(dst, []byte(src.AuthMechanism)...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendInt32(dst, int32(len(src.Data)))
+ dst = append(dst, src.Data...)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SASLInitialResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ AuthMechanism string
+ Data string
+ }{
+ Type: "SASLInitialResponse",
+ AuthMechanism: src.AuthMechanism,
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *SASLInitialResponse) UnmarshalJSON(data []byte) error {
+ // Ignore null, like in the main JSON package.
+ if string(data) == "null" {
+ return nil
+ }
+
+ var msg struct {
+ AuthMechanism string
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ dst.AuthMechanism = msg.AuthMechanism
+ if msg.Data != "" {
+ decoded, err := hex.DecodeString(msg.Data)
+ if err != nil {
+ return err
+ }
+ dst.Data = decoded
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go
new file mode 100644
index 0000000..1b604c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sasl_response.go
@@ -0,0 +1,56 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+)
+
+type SASLResponse struct {
+ Data []byte
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SASLResponse) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *SASLResponse) Decode(src []byte) error {
+ *dst = SASLResponse{Data: src}
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *SASLResponse) Encode(dst []byte) ([]byte, error) {
+ dst, sp := beginMessage(dst, 'p')
+ dst = append(dst, src.Data...)
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SASLResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "SASLResponse",
+ Data: string(src.Data),
+ })
+}
+
+// UnmarshalJSON implements encoding/json.Unmarshaler.
+func (dst *SASLResponse) UnmarshalJSON(data []byte) error {
+ var msg struct {
+ Data string
+ }
+ if err := json.Unmarshal(data, &msg); err != nil {
+ return err
+ }
+ if msg.Data != "" {
+ decoded, err := hex.DecodeString(msg.Data)
+ if err != nil {
+ return err
+ }
+ dst.Data = decoded
+ }
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go b/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go
new file mode 100644
index 0000000..b0fc284
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/ssl_request.go
@@ -0,0 +1,49 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const sslRequestNumber = 80877103
+
+type SSLRequest struct {
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*SSLRequest) Frontend() {}
+
+func (dst *SSLRequest) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("ssl request too short")
+ }
+
+ requestCode := binary.BigEndian.Uint32(src)
+
+ if requestCode != sslRequestNumber {
+ return errors.New("bad ssl request code")
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 4 byte message length.
+func (src *SSLRequest) Encode(dst []byte) ([]byte, error) {
+ dst = pgio.AppendInt32(dst, 8)
+ dst = pgio.AppendInt32(dst, sslRequestNumber)
+ return dst, nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src SSLRequest) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "SSLRequest",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go b/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
new file mode 100644
index 0000000..3af4587
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
@@ -0,0 +1,94 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const ProtocolVersionNumber = 196608 // 3.0
+
+type StartupMessage struct {
+ ProtocolVersion uint32
+ Parameters map[string]string
+}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*StartupMessage) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *StartupMessage) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.New("startup message too short")
+ }
+
+ dst.ProtocolVersion = binary.BigEndian.Uint32(src)
+ rp := 4
+
+ if dst.ProtocolVersion != ProtocolVersionNumber {
+ return fmt.Errorf("Bad startup message version number. Expected %d, got %d", ProtocolVersionNumber, dst.ProtocolVersion)
+ }
+
+ dst.Parameters = make(map[string]string)
+ for {
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMessage"}
+ }
+ key := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMessage"}
+ }
+ value := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ dst.Parameters[key] = value
+
+ if len(src[rp:]) == 1 {
+ if src[rp] != 0 {
+ return fmt.Errorf("Bad startup message last byte. Expected 0, got %d", src[rp])
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *StartupMessage) Encode(dst []byte) ([]byte, error) {
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint32(dst, src.ProtocolVersion)
+ for k, v := range src.Parameters {
+ dst = append(dst, k...)
+ dst = append(dst, 0)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ return finishMessage(dst, sp)
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src StartupMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "StartupMessage",
+ ProtocolVersion: src.ProtocolVersion,
+ Parameters: src.Parameters,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go b/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go
new file mode 100644
index 0000000..ea4fc95
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/sync.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Sync struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Sync) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Sync) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Sync", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Sync) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'S', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Sync) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Sync",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go b/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go
new file mode 100644
index 0000000..35a9dc8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/terminate.go
@@ -0,0 +1,34 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Terminate struct{}
+
+// Frontend identifies this message as sendable by a PostgreSQL frontend.
+func (*Terminate) Frontend() {}
+
+// Decode decodes src into dst. src must contain the complete message with the exception of the initial 1 byte message
+// type identifier and 4 byte message length.
+func (dst *Terminate) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Terminate", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+// Encode encodes src into dst. dst will include the 1 byte message type identifier and the 4 byte message length.
+func (src *Terminate) Encode(dst []byte) ([]byte, error) {
+ return append(dst, 'X', 0, 0, 0, 4), nil
+}
+
+// MarshalJSON implements encoding/json.Marshaler.
+func (src Terminate) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Terminate",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go b/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go
new file mode 100644
index 0000000..6cc7d3e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgproto3/trace.go
@@ -0,0 +1,416 @@
+package pgproto3
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// tracer traces the messages send to and from a Backend or Frontend. The format it produces roughly mimics the
+// format produced by the libpq C function PQtrace.
+type tracer struct {
+ TracerOptions
+
+ mux sync.Mutex
+ w io.Writer
+ buf *bytes.Buffer
+}
+
+// TracerOptions controls tracing behavior. It is roughly equivalent to the libpq function PQsetTraceFlags.
+type TracerOptions struct {
+ // SuppressTimestamps prevents printing of timestamps.
+ SuppressTimestamps bool
+
+ // RegressMode redacts fields that may be vary between executions.
+ RegressMode bool
+}
+
+func (t *tracer) traceMessage(sender byte, encodedLen int32, msg Message) {
+ switch msg := msg.(type) {
+ case *AuthenticationCleartextPassword:
+ t.traceAuthenticationCleartextPassword(sender, encodedLen, msg)
+ case *AuthenticationGSS:
+ t.traceAuthenticationGSS(sender, encodedLen, msg)
+ case *AuthenticationGSSContinue:
+ t.traceAuthenticationGSSContinue(sender, encodedLen, msg)
+ case *AuthenticationMD5Password:
+ t.traceAuthenticationMD5Password(sender, encodedLen, msg)
+ case *AuthenticationOk:
+ t.traceAuthenticationOk(sender, encodedLen, msg)
+ case *AuthenticationSASL:
+ t.traceAuthenticationSASL(sender, encodedLen, msg)
+ case *AuthenticationSASLContinue:
+ t.traceAuthenticationSASLContinue(sender, encodedLen, msg)
+ case *AuthenticationSASLFinal:
+ t.traceAuthenticationSASLFinal(sender, encodedLen, msg)
+ case *BackendKeyData:
+ t.traceBackendKeyData(sender, encodedLen, msg)
+ case *Bind:
+ t.traceBind(sender, encodedLen, msg)
+ case *BindComplete:
+ t.traceBindComplete(sender, encodedLen, msg)
+ case *CancelRequest:
+ t.traceCancelRequest(sender, encodedLen, msg)
+ case *Close:
+ t.traceClose(sender, encodedLen, msg)
+ case *CloseComplete:
+ t.traceCloseComplete(sender, encodedLen, msg)
+ case *CommandComplete:
+ t.traceCommandComplete(sender, encodedLen, msg)
+ case *CopyBothResponse:
+ t.traceCopyBothResponse(sender, encodedLen, msg)
+ case *CopyData:
+ t.traceCopyData(sender, encodedLen, msg)
+ case *CopyDone:
+ t.traceCopyDone(sender, encodedLen, msg)
+ case *CopyFail:
+ t.traceCopyFail(sender, encodedLen, msg)
+ case *CopyInResponse:
+ t.traceCopyInResponse(sender, encodedLen, msg)
+ case *CopyOutResponse:
+ t.traceCopyOutResponse(sender, encodedLen, msg)
+ case *DataRow:
+ t.traceDataRow(sender, encodedLen, msg)
+ case *Describe:
+ t.traceDescribe(sender, encodedLen, msg)
+ case *EmptyQueryResponse:
+ t.traceEmptyQueryResponse(sender, encodedLen, msg)
+ case *ErrorResponse:
+ t.traceErrorResponse(sender, encodedLen, msg)
+ case *Execute:
+ t.TraceQueryute(sender, encodedLen, msg)
+ case *Flush:
+ t.traceFlush(sender, encodedLen, msg)
+ case *FunctionCall:
+ t.traceFunctionCall(sender, encodedLen, msg)
+ case *FunctionCallResponse:
+ t.traceFunctionCallResponse(sender, encodedLen, msg)
+ case *GSSEncRequest:
+ t.traceGSSEncRequest(sender, encodedLen, msg)
+ case *NoData:
+ t.traceNoData(sender, encodedLen, msg)
+ case *NoticeResponse:
+ t.traceNoticeResponse(sender, encodedLen, msg)
+ case *NotificationResponse:
+ t.traceNotificationResponse(sender, encodedLen, msg)
+ case *ParameterDescription:
+ t.traceParameterDescription(sender, encodedLen, msg)
+ case *ParameterStatus:
+ t.traceParameterStatus(sender, encodedLen, msg)
+ case *Parse:
+ t.traceParse(sender, encodedLen, msg)
+ case *ParseComplete:
+ t.traceParseComplete(sender, encodedLen, msg)
+ case *PortalSuspended:
+ t.tracePortalSuspended(sender, encodedLen, msg)
+ case *Query:
+ t.traceQuery(sender, encodedLen, msg)
+ case *ReadyForQuery:
+ t.traceReadyForQuery(sender, encodedLen, msg)
+ case *RowDescription:
+ t.traceRowDescription(sender, encodedLen, msg)
+ case *SSLRequest:
+ t.traceSSLRequest(sender, encodedLen, msg)
+ case *StartupMessage:
+ t.traceStartupMessage(sender, encodedLen, msg)
+ case *Sync:
+ t.traceSync(sender, encodedLen, msg)
+ case *Terminate:
+ t.traceTerminate(sender, encodedLen, msg)
+ default:
+ t.writeTrace(sender, encodedLen, "Unknown", nil)
+ }
+}
+
+func (t *tracer) traceAuthenticationCleartextPassword(sender byte, encodedLen int32, msg *AuthenticationCleartextPassword) {
+ t.writeTrace(sender, encodedLen, "AuthenticationCleartextPassword", nil)
+}
+
+func (t *tracer) traceAuthenticationGSS(sender byte, encodedLen int32, msg *AuthenticationGSS) {
+ t.writeTrace(sender, encodedLen, "AuthenticationGSS", nil)
+}
+
+func (t *tracer) traceAuthenticationGSSContinue(sender byte, encodedLen int32, msg *AuthenticationGSSContinue) {
+ t.writeTrace(sender, encodedLen, "AuthenticationGSSContinue", nil)
+}
+
+func (t *tracer) traceAuthenticationMD5Password(sender byte, encodedLen int32, msg *AuthenticationMD5Password) {
+ t.writeTrace(sender, encodedLen, "AuthenticationMD5Password", nil)
+}
+
+func (t *tracer) traceAuthenticationOk(sender byte, encodedLen int32, msg *AuthenticationOk) {
+ t.writeTrace(sender, encodedLen, "AuthenticationOk", nil)
+}
+
+func (t *tracer) traceAuthenticationSASL(sender byte, encodedLen int32, msg *AuthenticationSASL) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASL", nil)
+}
+
+func (t *tracer) traceAuthenticationSASLContinue(sender byte, encodedLen int32, msg *AuthenticationSASLContinue) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASLContinue", nil)
+}
+
+func (t *tracer) traceAuthenticationSASLFinal(sender byte, encodedLen int32, msg *AuthenticationSASLFinal) {
+ t.writeTrace(sender, encodedLen, "AuthenticationSASLFinal", nil)
+}
+
+func (t *tracer) traceBackendKeyData(sender byte, encodedLen int32, msg *BackendKeyData) {
+ t.writeTrace(sender, encodedLen, "BackendKeyData", func() {
+ if t.RegressMode {
+ t.buf.WriteString("\t NNNN NNNN")
+ } else {
+ fmt.Fprintf(t.buf, "\t %d %d", msg.ProcessID, msg.SecretKey)
+ }
+ })
+}
+
+func (t *tracer) traceBind(sender byte, encodedLen int32, msg *Bind) {
+ t.writeTrace(sender, encodedLen, "Bind", func() {
+ fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.DestinationPortal)), traceDoubleQuotedString([]byte(msg.PreparedStatement)), len(msg.ParameterFormatCodes))
+ for _, fc := range msg.ParameterFormatCodes {
+ fmt.Fprintf(t.buf, " %d", fc)
+ }
+ fmt.Fprintf(t.buf, " %d", len(msg.Parameters))
+ for _, p := range msg.Parameters {
+ fmt.Fprintf(t.buf, " %s", traceSingleQuotedString(p))
+ }
+ fmt.Fprintf(t.buf, " %d", len(msg.ResultFormatCodes))
+ for _, fc := range msg.ResultFormatCodes {
+ fmt.Fprintf(t.buf, " %d", fc)
+ }
+ })
+}
+
+func (t *tracer) traceBindComplete(sender byte, encodedLen int32, msg *BindComplete) {
+ t.writeTrace(sender, encodedLen, "BindComplete", nil)
+}
+
+func (t *tracer) traceCancelRequest(sender byte, encodedLen int32, msg *CancelRequest) {
+ t.writeTrace(sender, encodedLen, "CancelRequest", nil)
+}
+
+func (t *tracer) traceClose(sender byte, encodedLen int32, msg *Close) {
+ t.writeTrace(sender, encodedLen, "Close", nil)
+}
+
+func (t *tracer) traceCloseComplete(sender byte, encodedLen int32, msg *CloseComplete) {
+ t.writeTrace(sender, encodedLen, "CloseComplete", nil)
+}
+
+func (t *tracer) traceCommandComplete(sender byte, encodedLen int32, msg *CommandComplete) {
+ t.writeTrace(sender, encodedLen, "CommandComplete", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString(msg.CommandTag))
+ })
+}
+
+func (t *tracer) traceCopyBothResponse(sender byte, encodedLen int32, msg *CopyBothResponse) {
+ t.writeTrace(sender, encodedLen, "CopyBothResponse", nil)
+}
+
+func (t *tracer) traceCopyData(sender byte, encodedLen int32, msg *CopyData) {
+ t.writeTrace(sender, encodedLen, "CopyData", nil)
+}
+
+func (t *tracer) traceCopyDone(sender byte, encodedLen int32, msg *CopyDone) {
+ t.writeTrace(sender, encodedLen, "CopyDone", nil)
+}
+
+func (t *tracer) traceCopyFail(sender byte, encodedLen int32, msg *CopyFail) {
+ t.writeTrace(sender, encodedLen, "CopyFail", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString([]byte(msg.Message)))
+ })
+}
+
+func (t *tracer) traceCopyInResponse(sender byte, encodedLen int32, msg *CopyInResponse) {
+ t.writeTrace(sender, encodedLen, "CopyInResponse", nil)
+}
+
+func (t *tracer) traceCopyOutResponse(sender byte, encodedLen int32, msg *CopyOutResponse) {
+ t.writeTrace(sender, encodedLen, "CopyOutResponse", nil)
+}
+
+func (t *tracer) traceDataRow(sender byte, encodedLen int32, msg *DataRow) {
+ t.writeTrace(sender, encodedLen, "DataRow", func() {
+ fmt.Fprintf(t.buf, "\t %d", len(msg.Values))
+ for _, v := range msg.Values {
+ if v == nil {
+ t.buf.WriteString(" -1")
+ } else {
+ fmt.Fprintf(t.buf, " %d %s", len(v), traceSingleQuotedString(v))
+ }
+ }
+ })
+}
+
+func (t *tracer) traceDescribe(sender byte, encodedLen int32, msg *Describe) {
+ t.writeTrace(sender, encodedLen, "Describe", func() {
+ fmt.Fprintf(t.buf, "\t %c %s", msg.ObjectType, traceDoubleQuotedString([]byte(msg.Name)))
+ })
+}
+
+func (t *tracer) traceEmptyQueryResponse(sender byte, encodedLen int32, msg *EmptyQueryResponse) {
+ t.writeTrace(sender, encodedLen, "EmptyQueryResponse", nil)
+}
+
+func (t *tracer) traceErrorResponse(sender byte, encodedLen int32, msg *ErrorResponse) {
+ t.writeTrace(sender, encodedLen, "ErrorResponse", nil)
+}
+
+func (t *tracer) TraceQueryute(sender byte, encodedLen int32, msg *Execute) {
+ t.writeTrace(sender, encodedLen, "Execute", func() {
+ fmt.Fprintf(t.buf, "\t %s %d", traceDoubleQuotedString([]byte(msg.Portal)), msg.MaxRows)
+ })
+}
+
+func (t *tracer) traceFlush(sender byte, encodedLen int32, msg *Flush) {
+ t.writeTrace(sender, encodedLen, "Flush", nil)
+}
+
+func (t *tracer) traceFunctionCall(sender byte, encodedLen int32, msg *FunctionCall) {
+ t.writeTrace(sender, encodedLen, "FunctionCall", nil)
+}
+
+func (t *tracer) traceFunctionCallResponse(sender byte, encodedLen int32, msg *FunctionCallResponse) {
+ t.writeTrace(sender, encodedLen, "FunctionCallResponse", nil)
+}
+
+func (t *tracer) traceGSSEncRequest(sender byte, encodedLen int32, msg *GSSEncRequest) {
+ t.writeTrace(sender, encodedLen, "GSSEncRequest", nil)
+}
+
+func (t *tracer) traceNoData(sender byte, encodedLen int32, msg *NoData) {
+ t.writeTrace(sender, encodedLen, "NoData", nil)
+}
+
+func (t *tracer) traceNoticeResponse(sender byte, encodedLen int32, msg *NoticeResponse) {
+ t.writeTrace(sender, encodedLen, "NoticeResponse", nil)
+}
+
+func (t *tracer) traceNotificationResponse(sender byte, encodedLen int32, msg *NotificationResponse) {
+ t.writeTrace(sender, encodedLen, "NotificationResponse", func() {
+ fmt.Fprintf(t.buf, "\t %d %s %s", msg.PID, traceDoubleQuotedString([]byte(msg.Channel)), traceDoubleQuotedString([]byte(msg.Payload)))
+ })
+}
+
+func (t *tracer) traceParameterDescription(sender byte, encodedLen int32, msg *ParameterDescription) {
+ t.writeTrace(sender, encodedLen, "ParameterDescription", nil)
+}
+
+func (t *tracer) traceParameterStatus(sender byte, encodedLen int32, msg *ParameterStatus) {
+ t.writeTrace(sender, encodedLen, "ParameterStatus", func() {
+ fmt.Fprintf(t.buf, "\t %s %s", traceDoubleQuotedString([]byte(msg.Name)), traceDoubleQuotedString([]byte(msg.Value)))
+ })
+}
+
+func (t *tracer) traceParse(sender byte, encodedLen int32, msg *Parse) {
+ t.writeTrace(sender, encodedLen, "Parse", func() {
+ fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.Name)), traceDoubleQuotedString([]byte(msg.Query)), len(msg.ParameterOIDs))
+ for _, oid := range msg.ParameterOIDs {
+ fmt.Fprintf(t.buf, " %d", oid)
+ }
+ })
+}
+
+func (t *tracer) traceParseComplete(sender byte, encodedLen int32, msg *ParseComplete) {
+ t.writeTrace(sender, encodedLen, "ParseComplete", nil)
+}
+
+func (t *tracer) tracePortalSuspended(sender byte, encodedLen int32, msg *PortalSuspended) {
+ t.writeTrace(sender, encodedLen, "PortalSuspended", nil)
+}
+
+func (t *tracer) traceQuery(sender byte, encodedLen int32, msg *Query) {
+ t.writeTrace(sender, encodedLen, "Query", func() {
+ fmt.Fprintf(t.buf, "\t %s", traceDoubleQuotedString([]byte(msg.String)))
+ })
+}
+
+func (t *tracer) traceReadyForQuery(sender byte, encodedLen int32, msg *ReadyForQuery) {
+ t.writeTrace(sender, encodedLen, "ReadyForQuery", func() {
+ fmt.Fprintf(t.buf, "\t %c", msg.TxStatus)
+ })
+}
+
+func (t *tracer) traceRowDescription(sender byte, encodedLen int32, msg *RowDescription) {
+ t.writeTrace(sender, encodedLen, "RowDescription", func() {
+ fmt.Fprintf(t.buf, "\t %d", len(msg.Fields))
+ for _, fd := range msg.Fields {
+ fmt.Fprintf(t.buf, ` %s %d %d %d %d %d %d`, traceDoubleQuotedString(fd.Name), fd.TableOID, fd.TableAttributeNumber, fd.DataTypeOID, fd.DataTypeSize, fd.TypeModifier, fd.Format)
+ }
+ })
+}
+
+func (t *tracer) traceSSLRequest(sender byte, encodedLen int32, msg *SSLRequest) {
+ t.writeTrace(sender, encodedLen, "SSLRequest", nil)
+}
+
+func (t *tracer) traceStartupMessage(sender byte, encodedLen int32, msg *StartupMessage) {
+ t.writeTrace(sender, encodedLen, "StartupMessage", nil)
+}
+
+func (t *tracer) traceSync(sender byte, encodedLen int32, msg *Sync) {
+ t.writeTrace(sender, encodedLen, "Sync", nil)
+}
+
+func (t *tracer) traceTerminate(sender byte, encodedLen int32, msg *Terminate) {
+ t.writeTrace(sender, encodedLen, "Terminate", nil)
+}
+
+func (t *tracer) writeTrace(sender byte, encodedLen int32, msgType string, writeDetails func()) {
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ defer func() {
+ if t.buf.Cap() > 1024 {
+ t.buf = &bytes.Buffer{}
+ } else {
+ t.buf.Reset()
+ }
+ }()
+
+ if !t.SuppressTimestamps {
+ now := time.Now()
+ t.buf.WriteString(now.Format("2006-01-02 15:04:05.000000"))
+ t.buf.WriteByte('\t')
+ }
+
+ t.buf.WriteByte(sender)
+ t.buf.WriteByte('\t')
+ t.buf.WriteString(msgType)
+ t.buf.WriteByte('\t')
+ t.buf.WriteString(strconv.FormatInt(int64(encodedLen), 10))
+
+ if writeDetails != nil {
+ writeDetails()
+ }
+
+ t.buf.WriteByte('\n')
+ t.buf.WriteTo(t.w)
+}
+
+// traceDoubleQuotedString returns t.buf as a double-quoted string without any escaping. It is roughly equivalent to
+// pqTraceOutputString in libpq.
+func traceDoubleQuotedString(buf []byte) string {
+ return `"` + string(buf) + `"`
+}
+
+// traceSingleQuotedString returns buf as a single-quoted string with non-printable characters hex-escaped. It is
+// roughly equivalent to pqTraceOutputNchar in libpq.
+func traceSingleQuotedString(buf []byte) string {
+ sb := &strings.Builder{}
+
+ sb.WriteByte('\'')
+ for _, b := range buf {
+ if b < 32 || b > 126 {
+ fmt.Fprintf(sb, `\x%x`, b)
+ } else {
+ sb.WriteByte(b)
+ }
+ }
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/array.go b/vendor/github.com/jackc/pgx/v5/pgtype/array.go
new file mode 100644
index 0000000..06b824a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/array.go
@@ -0,0 +1,460 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// Information on the internals of PostgreSQL arrays can be found in
+// src/include/utils/array.h and src/backend/utils/adt/arrayfuncs.c. Of
+// particular interest is the array_send function.
+
+type arrayHeader struct {
+ ContainsNull bool
+ ElementOID uint32
+ Dimensions []ArrayDimension
+}
+
+type ArrayDimension struct {
+ Length int32
+ LowerBound int32
+}
+
+// cardinality returns the number of elements in an array of dimensions size.
+func cardinality(dimensions []ArrayDimension) int {
+ if len(dimensions) == 0 {
+ return 0
+ }
+
+ elementCount := int(dimensions[0].Length)
+ for _, d := range dimensions[1:] {
+ elementCount *= int(d.Length)
+ }
+
+ return elementCount
+}
+
+func (dst *arrayHeader) DecodeBinary(m *Map, src []byte) (int, error) {
+ if len(src) < 12 {
+ return 0, fmt.Errorf("array header too short: %d", len(src))
+ }
+
+ rp := 0
+
+ numDims := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.ContainsNull = binary.BigEndian.Uint32(src[rp:]) == 1
+ rp += 4
+
+ dst.ElementOID = binary.BigEndian.Uint32(src[rp:])
+ rp += 4
+
+ dst.Dimensions = make([]ArrayDimension, numDims)
+ if len(src) < 12+numDims*8 {
+ return 0, fmt.Errorf("array header too short for %d dimensions: %d", numDims, len(src))
+ }
+ for i := range dst.Dimensions {
+ dst.Dimensions[i].Length = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.Dimensions[i].LowerBound = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ }
+
+ return rp, nil
+}
+
+func (src arrayHeader) EncodeBinary(buf []byte) []byte {
+ buf = pgio.AppendInt32(buf, int32(len(src.Dimensions)))
+
+ var containsNull int32
+ if src.ContainsNull {
+ containsNull = 1
+ }
+ buf = pgio.AppendInt32(buf, containsNull)
+
+ buf = pgio.AppendUint32(buf, src.ElementOID)
+
+ for i := range src.Dimensions {
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].Length)
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].LowerBound)
+ }
+
+ return buf
+}
+
+type untypedTextArray struct {
+ Elements []string
+ Quoted []bool
+ Dimensions []ArrayDimension
+}
+
+func parseUntypedTextArray(src string) (*untypedTextArray, error) {
+ dst := &untypedTextArray{
+ Elements: []string{},
+ Quoted: []bool{},
+ Dimensions: []ArrayDimension{},
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ var explicitDimensions []ArrayDimension
+
+ // Array has explicit dimensions
+ if r == '[' {
+ buf.UnreadRune()
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r == '=' {
+ break
+ } else if r != '[' {
+ return nil, fmt.Errorf("invalid array, expected '[' or '=' got %v", r)
+ }
+
+ lower, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != ':' {
+ return nil, fmt.Errorf("invalid array, expected ':' got %v", r)
+ }
+
+ upper, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != ']' {
+ return nil, fmt.Errorf("invalid array, expected ']' got %v", r)
+ }
+
+ explicitDimensions = append(explicitDimensions, ArrayDimension{LowerBound: lower, Length: upper - lower + 1})
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+ }
+
+ if r != '{' {
+ return nil, fmt.Errorf("invalid array, expected '{' got %v", r)
+ }
+
+ implicitDimensions := []ArrayDimension{{LowerBound: 1, Length: 0}}
+
+ // Consume all initial opening brackets. This provides number of dimensions.
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r == '{' {
+ implicitDimensions[len(implicitDimensions)-1].Length = 1
+ implicitDimensions = append(implicitDimensions, ArrayDimension{LowerBound: 1})
+ } else {
+ buf.UnreadRune()
+ break
+ }
+ }
+ currentDim := len(implicitDimensions) - 1
+ counterDim := currentDim
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ switch r {
+ case '{':
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ currentDim++
+ case ',':
+ case '}':
+ currentDim--
+ if currentDim < counterDim {
+ counterDim = currentDim
+ }
+ default:
+ buf.UnreadRune()
+ value, quoted, err := arrayParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid array value: %w", err)
+ }
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ dst.Quoted = append(dst.Quoted, quoted)
+ dst.Elements = append(dst.Elements, value)
+ }
+
+ if currentDim < 0 {
+ break
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ if len(dst.Elements) == 0 {
+ } else if len(explicitDimensions) > 0 {
+ dst.Dimensions = explicitDimensions
+ } else {
+ dst.Dimensions = implicitDimensions
+ }
+
+ return dst, nil
+}
+
+func skipWhitespace(buf *bytes.Buffer) {
+ var r rune
+ var err error
+ for r, _, _ = buf.ReadRune(); unicode.IsSpace(r); r, _, _ = buf.ReadRune() {
+ }
+
+ if err != io.EOF {
+ buf.UnreadRune()
+ }
+}
+
+func arrayParseValue(buf *bytes.Buffer) (string, bool, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ if r == '"' {
+ return arrayParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+
+ switch r {
+ case ',', '}':
+ buf.UnreadRune()
+ return s.String(), false, nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseQuotedValue(buf *bytes.Buffer) (string, bool, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", false, err
+ }
+ buf.UnreadRune()
+ return s.String(), true, nil
+ }
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseInteger(buf *bytes.Buffer) (int32, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return 0, err
+ }
+
+ if ('0' <= r && r <= '9') || r == '-' {
+ s.WriteRune(r)
+ } else {
+ buf.UnreadRune()
+ n, err := strconv.ParseInt(s.String(), 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(n), nil
+ }
+ }
+}
+
+func encodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
+ var customDimensions bool
+ for _, dim := range dimensions {
+ if dim.LowerBound != 1 {
+ customDimensions = true
+ }
+ }
+
+ if !customDimensions {
+ return buf
+ }
+
+ for _, dim := range dimensions {
+ buf = append(buf, '[')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound), 10)...)
+ buf = append(buf, ':')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound+dim.Length-1), 10)...)
+ buf = append(buf, ']')
+ }
+
+ return append(buf, '=')
+}
+
+var quoteArrayReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteArrayElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func isSpace(ch byte) bool {
+ // see array_isspace:
+ // https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\v' || ch == '\f'
+}
+
+func quoteArrayElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.EqualFold(src, "null")) || isSpace(src[0]) || isSpace(src[len(src)-1]) || strings.ContainsAny(src, `{},"\`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
+
+// Array represents a PostgreSQL array for T. It implements the ArrayGetter and ArraySetter interfaces. It preserves
+// PostgreSQL dimensions and custom lower bounds. Use FlatArray if these are not needed.
+type Array[T any] struct {
+ Elements []T
+ Dims []ArrayDimension
+ Valid bool
+}
+
+func (a Array[T]) Dimensions() []ArrayDimension {
+ return a.Dims
+}
+
+func (a Array[T]) Index(i int) any {
+ return a.Elements[i]
+}
+
+func (a Array[T]) IndexType() any {
+ var el T
+ return el
+}
+
+func (a *Array[T]) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ *a = Array[T]{}
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ *a = Array[T]{
+ Elements: make([]T, elementCount),
+ Dims: dimensions,
+ Valid: true,
+ }
+
+ return nil
+}
+
+func (a Array[T]) ScanIndex(i int) any {
+ return &a.Elements[i]
+}
+
+func (a Array[T]) ScanIndexType() any {
+ return new(T)
+}
+
+// FlatArray implements the ArrayGetter and ArraySetter interfaces for any slice of T. It ignores PostgreSQL dimensions
+// and custom lower bounds. Use Array to preserve these.
+type FlatArray[T any] []T
+
+func (a FlatArray[T]) Dimensions() []ArrayDimension {
+ if a == nil {
+ return nil
+ }
+
+ return []ArrayDimension{{Length: int32(len(a)), LowerBound: 1}}
+}
+
+func (a FlatArray[T]) Index(i int) any {
+ return a[i]
+}
+
+func (a FlatArray[T]) IndexType() any {
+ var el T
+ return el
+}
+
+func (a *FlatArray[T]) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ *a = nil
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ *a = make(FlatArray[T], elementCount)
+ return nil
+}
+
+func (a FlatArray[T]) ScanIndex(i int) any {
+ return &a[i]
+}
+
+func (a FlatArray[T]) ScanIndexType() any {
+ return new(T)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go
new file mode 100644
index 0000000..bf5f698
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/array_codec.go
@@ -0,0 +1,405 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// ArrayGetter is a type that can be converted into a PostgreSQL array.
+type ArrayGetter interface {
+ // Dimensions returns the array dimensions. If array is nil then nil is returned.
+ Dimensions() []ArrayDimension
+
+ // Index returns the element at i.
+ Index(i int) any
+
+ // IndexType returns a non-nil scan target of the type Index will return. This is used by ArrayCodec.PlanEncode.
+ IndexType() any
+}
+
+// ArraySetter is a type can be set from a PostgreSQL array.
+type ArraySetter interface {
+ // SetDimensions prepares the value such that ScanIndex can be called for each element. This will remove any existing
+ // elements. dimensions may be nil to indicate a NULL array. If unable to exactly preserve dimensions SetDimensions
+ // may return an error or silently flatten the array dimensions.
+ SetDimensions(dimensions []ArrayDimension) error
+
+ // ScanIndex returns a value usable as a scan target for i. SetDimensions must be called before ScanIndex.
+ ScanIndex(i int) any
+
+ // ScanIndexType returns a non-nil scan target of the type ScanIndex will return. This is used by
+ // ArrayCodec.PlanScan.
+ ScanIndexType() any
+}
+
+// ArrayCodec is a codec for any array type.
+type ArrayCodec struct {
+ ElementType *Type
+}
+
+func (c *ArrayCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *ArrayCodec) PreferredFormat() int16 {
+ // The binary format should always be preferred for arrays if it is supported. Usually, this will happen automatically
+ // because most types that support binary prefer it. However, text, json, and jsonb support binary but prefer the text
+ // format. This is because it is simpler for jsonb and PostgreSQL can be significantly faster using the text format
+ // for text-like data types than binary. However, arrays appear to always be faster in binary.
+ //
+ // https://www.postgresql.org/message-id/CAMovtNoHFod2jMAKQjjxv209PCTJx5Kc66anwWvX0mEiaXwgmA%40mail.gmail.com
+ if c.ElementType.Codec.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *ArrayCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ arrayValuer, ok := value.(ArrayGetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := arrayValuer.IndexType()
+
+ elementEncodePlan := m.PlanEncode(c.ElementType.OID, format, elementType)
+ if elementEncodePlan == nil {
+ if reflect.TypeOf(elementType) != nil {
+ return nil
+ }
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanArrayCodecBinary{ac: c, m: m, oid: oid}
+ case TextFormatCode:
+ return &encodePlanArrayCodecText{ac: c, m: m, oid: oid}
+ }
+
+ return nil
+}
+
+type encodePlanArrayCodecText struct {
+ ac *ArrayCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanArrayCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ array := value.(ArrayGetter)
+
+ dimensions := array.Dimensions()
+ if dimensions == nil {
+ return nil, nil
+ }
+
+ elementCount := cardinality(dimensions)
+ if elementCount == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = encodeTextArrayDimensions(buf, dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(dimensions))
+ dimElemCounts[len(dimensions)-1] = int(dimensions[len(dimensions)-1].Length)
+ for i := len(dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ inElemBuf := make([]byte, 0, 32)
+ for i := 0; i < elementCount; i++ {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elem := array.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, TextFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", array.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, quoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+type encodePlanArrayCodecBinary struct {
+ ac *ArrayCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanArrayCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ array := value.(ArrayGetter)
+
+ dimensions := array.Dimensions()
+ if dimensions == nil {
+ return nil, nil
+ }
+
+ arrayHeader := arrayHeader{
+ Dimensions: dimensions,
+ ElementOID: p.ac.ElementType.OID,
+ }
+
+ containsNullIndex := len(buf) + 4
+
+ buf = arrayHeader.EncodeBinary(buf)
+
+ elementCount := cardinality(dimensions)
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ for i := 0; i < elementCount; i++ {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elem := array.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, BinaryFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", array.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ pgio.SetInt32(buf[containsNullIndex:], 1)
+ } else {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+func (c *ArrayCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ arrayScanner, ok := target.(ArraySetter)
+ if !ok {
+ return nil
+ }
+
+ // target / arrayScanner might be a pointer to a nil. If it is create one so we can call ScanIndexType to plan the
+ // scan of the elements.
+ if isNil, _ := isNilDriverValuer(target); isNil {
+ arrayScanner = reflect.New(reflect.TypeOf(target).Elem()).Interface().(ArraySetter)
+ }
+
+ elementType := arrayScanner.ScanIndexType()
+
+ elementScanPlan := m.PlanScan(c.ElementType.OID, format, elementType)
+ if _, ok := elementScanPlan.(*scanPlanFail); ok {
+ return nil
+ }
+
+ return &scanPlanArrayCodec{
+ arrayCodec: c,
+ m: m,
+ oid: oid,
+ formatCode: format,
+ }
+}
+
+func (c *ArrayCodec) decodeBinary(m *Map, arrayOID uint32, src []byte, array ArraySetter) error {
+ var arrayHeader arrayHeader
+ rp, err := arrayHeader.DecodeBinary(m, src)
+ if err != nil {
+ return err
+ }
+
+ err = array.SetDimensions(arrayHeader.Dimensions)
+ if err != nil {
+ return err
+ }
+
+ elementCount := cardinality(arrayHeader.Dimensions)
+ if elementCount == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, BinaryFormatCode, array.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, BinaryFormatCode, array.ScanIndex(0))
+ }
+
+ for i := 0; i < elementCount; i++ {
+ elem := array.ScanIndex(i)
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return fmt.Errorf("failed to scan array element %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (c *ArrayCodec) decodeText(m *Map, arrayOID uint32, src []byte, array ArraySetter) error {
+ uta, err := parseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ err = array.SetDimensions(uta.Dimensions)
+ if err != nil {
+ return err
+ }
+
+ if len(uta.Elements) == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, TextFormatCode, array.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, array.ScanIndex(0))
+ }
+
+ for i, s := range uta.Elements {
+ elem := array.ScanIndex(i)
+ var elemSrc []byte
+ if s != "NULL" || uta.Quoted[i] {
+ elemSrc = []byte(s)
+ }
+
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type scanPlanArrayCodec struct {
+ arrayCodec *ArrayCodec
+ m *Map
+ oid uint32
+ formatCode int16
+ elementScanPlan ScanPlan
+}
+
+func (spac *scanPlanArrayCodec) Scan(src []byte, dst any) error {
+ c := spac.arrayCodec
+ m := spac.m
+ oid := spac.oid
+ formatCode := spac.formatCode
+
+ array := dst.(ArraySetter)
+
+ if src == nil {
+ return array.SetDimensions(nil)
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ return c.decodeBinary(m, oid, src, array)
+ case TextFormatCode:
+ return c.decodeText(m, oid, src, array)
+ default:
+ return fmt.Errorf("unknown format code %d", formatCode)
+ }
+}
+
+func (c *ArrayCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *ArrayCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var slice []any
+ err := m.PlanScan(oid, format, &slice).Scan(src, &slice)
+ return slice, err
+}
+
+func isRagged(slice reflect.Value) bool {
+ if slice.Type().Elem().Kind() != reflect.Slice {
+ return false
+ }
+
+ sliceLen := slice.Len()
+ innerLen := 0
+ for i := 0; i < sliceLen; i++ {
+ if i == 0 {
+ innerLen = slice.Index(i).Len()
+ } else {
+ if slice.Index(i).Len() != innerLen {
+ return true
+ }
+ }
+ if isRagged(slice.Index(i)) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bits.go b/vendor/github.com/jackc/pgx/v5/pgtype/bits.go
new file mode 100644
index 0000000..e7a1d01
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bits.go
@@ -0,0 +1,210 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BitsScanner interface {
+ ScanBits(v Bits) error
+}
+
+type BitsValuer interface {
+ BitsValue() (Bits, error)
+}
+
+// Bits represents the PostgreSQL bit and varbit types.
+type Bits struct {
+ Bytes []byte
+ Len int32 // Number of bits
+ Valid bool
+}
+
+func (b *Bits) ScanBits(v Bits) error {
+ *b = v
+ return nil
+}
+
+func (b Bits) BitsValue() (Bits, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bits) Scan(src any) error {
+ if src == nil {
+ *dst = Bits{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToBitsScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Bits) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := BitsCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type BitsCodec struct{}
+
+func (BitsCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BitsCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BitsCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(BitsValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanBitsCodecBinary{}
+ case TextFormatCode:
+ return encodePlanBitsCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanBitsCodecBinary struct{}
+
+func (encodePlanBitsCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ bits, err := value.(BitsValuer).BitsValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !bits.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, bits.Len)
+ return append(buf, bits.Bytes...), nil
+}
+
+type encodePlanBitsCodecText struct{}
+
+func (encodePlanBitsCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ bits, err := value.(BitsValuer).BitsValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !bits.Valid {
+ return nil, nil
+ }
+
+ for i := int32(0); i < bits.Len; i++ {
+ byteIdx := i / 8
+ bitMask := byte(128 >> byte(i%8))
+ char := byte('0')
+ if bits.Bytes[byteIdx]&bitMask > 0 {
+ char = '1'
+ }
+ buf = append(buf, char)
+ }
+
+ return buf, nil
+}
+
+func (BitsCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case BitsScanner:
+ return scanPlanBinaryBitsToBitsScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case BitsScanner:
+ return scanPlanTextAnyToBitsScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c BitsCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c BitsCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var box Bits
+ err := codecScan(c, m, oid, format, src, &box)
+ if err != nil {
+ return nil, err
+ }
+ return box, nil
+}
+
+type scanPlanBinaryBitsToBitsScanner struct{}
+
+func (scanPlanBinaryBitsToBitsScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BitsScanner)
+
+ if src == nil {
+ return scanner.ScanBits(Bits{})
+ }
+
+ if len(src) < 4 {
+ return fmt.Errorf("invalid length for bit/varbit: %v", len(src))
+ }
+
+ bitLen := int32(binary.BigEndian.Uint32(src))
+ rp := 4
+ buf := make([]byte, len(src[rp:]))
+ copy(buf, src[rp:])
+
+ return scanner.ScanBits(Bits{Bytes: buf, Len: bitLen, Valid: true})
+}
+
+type scanPlanTextAnyToBitsScanner struct{}
+
+func (scanPlanTextAnyToBitsScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BitsScanner)
+
+ if src == nil {
+ return scanner.ScanBits(Bits{})
+ }
+
+ bitLen := len(src)
+ byteLen := bitLen / 8
+ if bitLen%8 > 0 {
+ byteLen++
+ }
+ buf := make([]byte, byteLen)
+
+ for i, b := range src {
+ if b == '1' {
+ byteIdx := i / 8
+ bitIdx := uint(i % 8)
+ buf[byteIdx] = buf[byteIdx] | (128 >> bitIdx)
+ }
+ }
+
+ return scanner.ScanBits(Bits{Bytes: buf, Len: int32(bitLen), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bool.go b/vendor/github.com/jackc/pgx/v5/pgtype/bool.go
new file mode 100644
index 0000000..71caffa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bool.go
@@ -0,0 +1,343 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type BoolScanner interface {
+ ScanBool(v Bool) error
+}
+
+type BoolValuer interface {
+ BoolValue() (Bool, error)
+}
+
+type Bool struct {
+ Bool bool
+ Valid bool
+}
+
+func (b *Bool) ScanBool(v Bool) error {
+ *b = v
+ return nil
+}
+
+func (b Bool) BoolValue() (Bool, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bool) Scan(src any) error {
+ if src == nil {
+ *dst = Bool{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case bool:
+ *dst = Bool{Bool: src, Valid: true}
+ return nil
+ case string:
+ b, err := strconv.ParseBool(src)
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: b, Valid: true}
+ return nil
+ case []byte:
+ b, err := strconv.ParseBool(string(src))
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: b, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Bool) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ return src.Bool, nil
+}
+
+func (src Bool) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ if src.Bool {
+ return []byte("true"), nil
+ } else {
+ return []byte("false"), nil
+ }
+}
+
+func (dst *Bool) UnmarshalJSON(b []byte) error {
+ var v *bool
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ if v == nil {
+ *dst = Bool{}
+ } else {
+ *dst = Bool{Bool: *v, Valid: true}
+ }
+
+ return nil
+}
+
+type BoolCodec struct{}
+
+func (BoolCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BoolCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BoolCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case bool:
+ return encodePlanBoolCodecBinaryBool{}
+ case BoolValuer:
+ return encodePlanBoolCodecBinaryBoolValuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case bool:
+ return encodePlanBoolCodecTextBool{}
+ case BoolValuer:
+ return encodePlanBoolCodecTextBoolValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanBoolCodecBinaryBool struct{}
+
+func (encodePlanBoolCodecBinaryBool) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(bool)
+
+ if v {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecTextBoolValuer struct{}
+
+func (encodePlanBoolCodecTextBoolValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BoolValuer).BoolValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !b.Valid {
+ return nil, nil
+ }
+
+ if b.Bool {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecBinaryBoolValuer struct{}
+
+func (encodePlanBoolCodecBinaryBoolValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BoolValuer).BoolValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !b.Valid {
+ return nil, nil
+ }
+
+ if b.Bool {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+type encodePlanBoolCodecTextBool struct{}
+
+func (encodePlanBoolCodecTextBool) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(bool)
+
+ if v {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+func (BoolCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *bool:
+ return scanPlanBinaryBoolToBool{}
+ case BoolScanner:
+ return scanPlanBinaryBoolToBoolScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *bool:
+ return scanPlanTextAnyToBool{}
+ case BoolScanner:
+ return scanPlanTextAnyToBoolScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c BoolCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c BoolCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var b bool
+ err := codecScan(c, m, oid, format, src, &b)
+ if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+type scanPlanBinaryBoolToBool struct{}
+
+func (scanPlanBinaryBoolToBool) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 1 {
+ return fmt.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ p, ok := (dst).(*bool)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = src[0] == 1
+
+ return nil
+}
+
+type scanPlanTextAnyToBool struct{}
+
+func (scanPlanTextAnyToBool) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("cannot scan empty string into %T", dst)
+ }
+
+ p, ok := (dst).(*bool)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ v, err := planTextToBool(src)
+ if err != nil {
+ return err
+ }
+
+ *p = v
+
+ return nil
+}
+
+type scanPlanBinaryBoolToBoolScanner struct{}
+
+func (scanPlanBinaryBoolToBoolScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(BoolScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanBool(Bool{})
+ }
+
+ if len(src) != 1 {
+ return fmt.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ return s.ScanBool(Bool{Bool: src[0] == 1, Valid: true})
+}
+
+type scanPlanTextAnyToBoolScanner struct{}
+
+func (scanPlanTextAnyToBoolScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(BoolScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanBool(Bool{})
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("cannot scan empty string into %T", dst)
+ }
+
+ v, err := planTextToBool(src)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanBool(Bool{Bool: v, Valid: true})
+}
+
+// https://www.postgresql.org/docs/11/datatype-boolean.html
+func planTextToBool(src []byte) (bool, error) {
+ s := string(bytes.ToLower(bytes.TrimSpace(src)))
+
+ switch {
+ case strings.HasPrefix("true", s), strings.HasPrefix("yes", s), s == "on", s == "1":
+ return true, nil
+ case strings.HasPrefix("false", s), strings.HasPrefix("no", s), strings.HasPrefix("off", s), s == "0":
+ return false, nil
+ default:
+ return false, fmt.Errorf("unknown boolean string representation %q", src)
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/box.go b/vendor/github.com/jackc/pgx/v5/pgtype/box.go
new file mode 100644
index 0000000..887d268
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/box.go
@@ -0,0 +1,238 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type BoxScanner interface {
+ ScanBox(v Box) error
+}
+
+type BoxValuer interface {
+ BoxValue() (Box, error)
+}
+
+type Box struct {
+ P [2]Vec2
+ Valid bool
+}
+
+func (b *Box) ScanBox(v Box) error {
+ *b = v
+ return nil
+}
+
+func (b Box) BoxValue() (Box, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Box) Scan(src any) error {
+ if src == nil {
+ *dst = Box{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToBoxScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Box) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := BoxCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type BoxCodec struct{}
+
+func (BoxCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (BoxCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (BoxCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(BoxValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanBoxCodecBinary{}
+ case TextFormatCode:
+ return encodePlanBoxCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanBoxCodecBinary struct{}
+
+func (encodePlanBoxCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ box, err := value.(BoxValuer).BoxValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !box.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(box.P[1].Y))
+ return buf, nil
+}
+
+type encodePlanBoxCodecText struct{}
+
+func (encodePlanBoxCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ box, err := value.(BoxValuer).BoxValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !box.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%s,%s),(%s,%s)`,
+ strconv.FormatFloat(box.P[0].X, 'f', -1, 64),
+ strconv.FormatFloat(box.P[0].Y, 'f', -1, 64),
+ strconv.FormatFloat(box.P[1].X, 'f', -1, 64),
+ strconv.FormatFloat(box.P[1].Y, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (BoxCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case BoxScanner:
+ return scanPlanBinaryBoxToBoxScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case BoxScanner:
+ return scanPlanTextAnyToBoxScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryBoxToBoxScanner struct{}
+
+func (scanPlanBinaryBoxToBoxScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BoxScanner)
+
+ if src == nil {
+ return scanner.ScanBox(Box{})
+ }
+
+ if len(src) != 32 {
+ return fmt.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ return scanner.ScanBox(Box{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToBoxScanner struct{}
+
+func (scanPlanTextAnyToBoxScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BoxScanner)
+
+ if src == nil {
+ return scanner.ScanBox(Box{})
+ }
+
+ if len(src) < 11 {
+ return fmt.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ str := string(src[1:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-1]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanBox(Box{P: [2]Vec2{{x1, y1}, {x2, y2}}, Valid: true})
+}
+
+func (c BoxCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c BoxCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var box Box
+ err := codecScan(c, m, oid, format, src, &box)
+ if err != nil {
+ return nil, err
+ }
+ return box, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go b/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
new file mode 100644
index 0000000..b39d3fa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
@@ -0,0 +1,952 @@
+package pgtype
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "net"
+ "net/netip"
+ "reflect"
+ "time"
+)
+
+type int8Wrapper int8
+
+func (w int8Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int8Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int8")
+ }
+
+ if v.Int64 < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", v.Int64)
+ }
+ if v.Int64 > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", v.Int64)
+ }
+ *w = int8Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int8Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int16Wrapper int16
+
+func (w int16Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int16Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int16")
+ }
+
+ if v.Int64 < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", v.Int64)
+ }
+ if v.Int64 > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", v.Int64)
+ }
+ *w = int16Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int16Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int32Wrapper int32
+
+func (w int32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int32")
+ }
+
+ if v.Int64 < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for int32", v.Int64)
+ }
+ if v.Int64 > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for int32", v.Int64)
+ }
+ *w = int32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int32Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type int64Wrapper int64
+
+func (w int64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *int64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int64")
+ }
+
+ *w = int64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w int64Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type intWrapper int
+
+func (w intWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *intWrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *int")
+ }
+
+ if v.Int64 < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", v.Int64)
+ }
+ if v.Int64 > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", v.Int64)
+ }
+
+ *w = intWrapper(v.Int64)
+
+ return nil
+}
+
+func (w intWrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint8Wrapper uint8
+
+func (w uint8Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint8Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint8")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", v.Int64)
+ }
+ if v.Int64 > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", v.Int64)
+ }
+ *w = uint8Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint8Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint16Wrapper uint16
+
+func (w uint16Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint16Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint16")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", v.Int64)
+ }
+ if v.Int64 > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", v.Int64)
+ }
+ *w = uint16Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint16Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint32Wrapper uint32
+
+func (w uint32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint32")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+ *w = uint32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint32Wrapper) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+type uint64Wrapper uint64
+
+func (w uint64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uint64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ *w = uint64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w uint64Wrapper) Int64Value() (Int8, error) {
+ if uint64(w) > uint64(math.MaxInt64) {
+ return Int8{}, fmt.Errorf("%d is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *uint64Wrapper) ScanNumeric(v Numeric) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ bi, err := v.toBigInt()
+ if err != nil {
+ return fmt.Errorf("cannot scan into *uint64: %w", err)
+ }
+
+ if !bi.IsUint64() {
+ return fmt.Errorf("cannot scan %v into *uint64", bi.String())
+ }
+
+ *w = uint64Wrapper(bi.Uint64())
+
+ return nil
+}
+
+func (w uint64Wrapper) NumericValue() (Numeric, error) {
+ return Numeric{Int: new(big.Int).SetUint64(uint64(w)), Valid: true}, nil
+}
+
+type uintWrapper uint
+
+func (w uintWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *uintWrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint64")
+ }
+
+ if v.Int64 < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", v.Int64)
+ }
+
+ if uint64(v.Int64) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", v.Int64)
+ }
+
+ *w = uintWrapper(v.Int64)
+
+ return nil
+}
+
+func (w uintWrapper) Int64Value() (Int8, error) {
+ if uint64(w) > uint64(math.MaxInt64) {
+ return Int8{}, fmt.Errorf("%d is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *uintWrapper) ScanNumeric(v Numeric) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *uint")
+ }
+
+ bi, err := v.toBigInt()
+ if err != nil {
+ return fmt.Errorf("cannot scan into *uint: %w", err)
+ }
+
+ if !bi.IsUint64() {
+ return fmt.Errorf("cannot scan %v into *uint", bi.String())
+ }
+
+ ui := bi.Uint64()
+
+ if math.MaxUint < ui {
+ return fmt.Errorf("cannot scan %v into *uint", ui)
+ }
+
+ *w = uintWrapper(ui)
+
+ return nil
+}
+
+func (w uintWrapper) NumericValue() (Numeric, error) {
+ return Numeric{Int: new(big.Int).SetUint64(uint64(w)), Valid: true}, nil
+}
+
+type float32Wrapper float32
+
+func (w float32Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *float32Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float32")
+ }
+
+ *w = float32Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w float32Wrapper) Int64Value() (Int8, error) {
+ if w > math.MaxInt64 {
+ return Int8{}, fmt.Errorf("%f is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *float32Wrapper) ScanFloat64(v Float8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float32")
+ }
+
+ *w = float32Wrapper(v.Float64)
+
+ return nil
+}
+
+func (w float32Wrapper) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(w), Valid: true}, nil
+}
+
+type float64Wrapper float64
+
+func (w float64Wrapper) SkipUnderlyingTypePlan() {}
+
+func (w *float64Wrapper) ScanInt64(v Int8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float64")
+ }
+
+ *w = float64Wrapper(v.Int64)
+
+ return nil
+}
+
+func (w float64Wrapper) Int64Value() (Int8, error) {
+ if w > math.MaxInt64 {
+ return Int8{}, fmt.Errorf("%f is greater than maximum value for int64", w)
+ }
+
+ return Int8{Int64: int64(w), Valid: true}, nil
+}
+
+func (w *float64Wrapper) ScanFloat64(v Float8) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *float64")
+ }
+
+ *w = float64Wrapper(v.Float64)
+
+ return nil
+}
+
+func (w float64Wrapper) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(w), Valid: true}, nil
+}
+
+type stringWrapper string
+
+func (w stringWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *stringWrapper) ScanText(v Text) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *string")
+ }
+
+ *w = stringWrapper(v.String)
+ return nil
+}
+
+func (w stringWrapper) TextValue() (Text, error) {
+ return Text{String: string(w), Valid: true}, nil
+}
+
+type timeWrapper time.Time
+
+func (w *timeWrapper) ScanDate(v Date) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) DateValue() (Date, error) {
+ return Date{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTimestamp(v Timestamp) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) TimestampValue() (Timestamp, error) {
+ return Timestamp{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTimestamptz(v Timestamptz) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ switch v.InfinityModifier {
+ case Finite:
+ *w = timeWrapper(v.Time)
+ return nil
+ case Infinity:
+ return fmt.Errorf("cannot scan Infinity into *time.Time")
+ case NegativeInfinity:
+ return fmt.Errorf("cannot scan -Infinity into *time.Time")
+ default:
+ return fmt.Errorf("invalid InfinityModifier: %v", v.InfinityModifier)
+ }
+}
+
+func (w timeWrapper) TimestamptzValue() (Timestamptz, error) {
+ return Timestamptz{Time: time.Time(w), Valid: true}, nil
+}
+
+func (w *timeWrapper) ScanTime(v Time) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Time")
+ }
+
+ // 24:00:00 is max allowed time in PostgreSQL, but time.Time will normalize that to 00:00:00 the next day.
+ var maxRepresentableByTime int64 = 24*60*60*1000000 - 1
+ if v.Microseconds > maxRepresentableByTime {
+ return fmt.Errorf("%d microseconds cannot be represented as time.Time", v.Microseconds)
+ }
+
+ usec := v.Microseconds
+ hours := usec / microsecondsPerHour
+ usec -= hours * microsecondsPerHour
+ minutes := usec / microsecondsPerMinute
+ usec -= minutes * microsecondsPerMinute
+ seconds := usec / microsecondsPerSecond
+ usec -= seconds * microsecondsPerSecond
+ ns := usec * 1000
+ *w = timeWrapper(time.Date(2000, 1, 1, int(hours), int(minutes), int(seconds), int(ns), time.UTC))
+ return nil
+}
+
+func (w timeWrapper) TimeValue() (Time, error) {
+ t := time.Time(w)
+ usec := int64(t.Hour())*microsecondsPerHour +
+ int64(t.Minute())*microsecondsPerMinute +
+ int64(t.Second())*microsecondsPerSecond +
+ int64(t.Nanosecond())/1000
+ return Time{Microseconds: usec, Valid: true}, nil
+}
+
+type durationWrapper time.Duration
+
+func (w durationWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *durationWrapper) ScanInterval(v Interval) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *time.Interval")
+ }
+
+ us := int64(v.Months)*microsecondsPerMonth + int64(v.Days)*microsecondsPerDay + v.Microseconds
+ *w = durationWrapper(time.Duration(us) * time.Microsecond)
+ return nil
+}
+
+func (w durationWrapper) IntervalValue() (Interval, error) {
+ return Interval{Microseconds: int64(w) / 1000, Valid: true}, nil
+}
+
+type netIPNetWrapper net.IPNet
+
+func (w *netIPNetWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ return fmt.Errorf("cannot scan NULL into *net.IPNet")
+ }
+
+ *w = netIPNetWrapper{
+ IP: v.Addr().AsSlice(),
+ Mask: net.CIDRMask(v.Bits(), v.Addr().BitLen()),
+ }
+
+ return nil
+}
+func (w netIPNetWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ ip, ok := netip.AddrFromSlice(w.IP)
+ if !ok {
+ return netip.Prefix{}, errors.New("invalid net.IPNet")
+ }
+
+ ones, _ := w.Mask.Size()
+
+ return netip.PrefixFrom(ip, ones), nil
+}
+
+type netIPWrapper net.IP
+
+func (w netIPWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *netIPWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ *w = nil
+ return nil
+ }
+
+ if v.Addr().BitLen() != v.Bits() {
+ return fmt.Errorf("cannot scan %v to *net.IP", v)
+ }
+
+ *w = netIPWrapper(v.Addr().AsSlice())
+ return nil
+}
+
+func (w netIPWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ if w == nil {
+ return netip.Prefix{}, nil
+ }
+
+ addr, ok := netip.AddrFromSlice([]byte(w))
+ if !ok {
+ return netip.Prefix{}, errors.New("invalid net.IP")
+ }
+
+ return netip.PrefixFrom(addr, addr.BitLen()), nil
+}
+
+type netipPrefixWrapper netip.Prefix
+
+func (w *netipPrefixWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ *w = netipPrefixWrapper(v)
+ return nil
+}
+
+func (w netipPrefixWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ return netip.Prefix(w), nil
+}
+
+type netipAddrWrapper netip.Addr
+
+func (w *netipAddrWrapper) ScanNetipPrefix(v netip.Prefix) error {
+ if !v.IsValid() {
+ *w = netipAddrWrapper(netip.Addr{})
+ return nil
+ }
+
+ if v.Addr().BitLen() != v.Bits() {
+ return fmt.Errorf("cannot scan %v to netip.Addr", v)
+ }
+
+ *w = netipAddrWrapper(v.Addr())
+
+ return nil
+}
+
+func (w netipAddrWrapper) NetipPrefixValue() (netip.Prefix, error) {
+ addr := (netip.Addr)(w)
+ if !addr.IsValid() {
+ return netip.Prefix{}, nil
+ }
+
+ return netip.PrefixFrom(addr, addr.BitLen()), nil
+}
+
+type mapStringToPointerStringWrapper map[string]*string
+
+func (w *mapStringToPointerStringWrapper) ScanHstore(v Hstore) error {
+ *w = mapStringToPointerStringWrapper(v)
+ return nil
+}
+
+func (w mapStringToPointerStringWrapper) HstoreValue() (Hstore, error) {
+ return Hstore(w), nil
+}
+
+type mapStringToStringWrapper map[string]string
+
+func (w *mapStringToStringWrapper) ScanHstore(v Hstore) error {
+ *w = make(mapStringToStringWrapper, len(v))
+ for k, v := range v {
+ if v == nil {
+ return fmt.Errorf("cannot scan NULL to string")
+ }
+ (*w)[k] = *v
+ }
+ return nil
+}
+
+func (w mapStringToStringWrapper) HstoreValue() (Hstore, error) {
+ if w == nil {
+ return nil, nil
+ }
+
+ hstore := make(Hstore, len(w))
+ for k, v := range w {
+ s := v
+ hstore[k] = &s
+ }
+ return hstore, nil
+}
+
+type fmtStringerWrapper struct {
+ s fmt.Stringer
+}
+
+func (w fmtStringerWrapper) TextValue() (Text, error) {
+ return Text{String: w.s.String(), Valid: true}, nil
+}
+
+type byte16Wrapper [16]byte
+
+func (w *byte16Wrapper) ScanUUID(v UUID) error {
+ if !v.Valid {
+ return fmt.Errorf("cannot scan NULL into *[16]byte")
+ }
+ *w = byte16Wrapper(v.Bytes)
+ return nil
+}
+
+func (w byte16Wrapper) UUIDValue() (UUID, error) {
+ return UUID{Bytes: [16]byte(w), Valid: true}, nil
+}
+
+type byteSliceWrapper []byte
+
+func (w byteSliceWrapper) SkipUnderlyingTypePlan() {}
+
+func (w *byteSliceWrapper) ScanText(v Text) error {
+ if !v.Valid {
+ *w = nil
+ return nil
+ }
+
+ *w = byteSliceWrapper(v.String)
+ return nil
+}
+
+func (w byteSliceWrapper) TextValue() (Text, error) {
+ if w == nil {
+ return Text{}, nil
+ }
+
+ return Text{String: string(w), Valid: true}, nil
+}
+
+func (w *byteSliceWrapper) ScanUUID(v UUID) error {
+ if !v.Valid {
+ *w = nil
+ return nil
+ }
+ *w = make(byteSliceWrapper, 16)
+ copy(*w, v.Bytes[:])
+ return nil
+}
+
+func (w byteSliceWrapper) UUIDValue() (UUID, error) {
+ if w == nil {
+ return UUID{}, nil
+ }
+
+ uuid := UUID{Valid: true}
+ copy(uuid.Bytes[:], w)
+ return uuid, nil
+}
+
+// structWrapper implements CompositeIndexGetter for a struct.
+type structWrapper struct {
+ s any
+ exportedFields []reflect.Value
+}
+
+func (w structWrapper) IsNull() bool {
+ return w.s == nil
+}
+
+func (w structWrapper) Index(i int) any {
+ if i >= len(w.exportedFields) {
+ return fmt.Errorf("%#v only has %d public fields - %d is out of bounds", w.s, len(w.exportedFields), i)
+ }
+
+ return w.exportedFields[i].Interface()
+}
+
+// ptrStructWrapper implements CompositeIndexScanner for a pointer to a struct.
+type ptrStructWrapper struct {
+ s any
+ exportedFields []reflect.Value
+}
+
+func (w *ptrStructWrapper) ScanNull() error {
+ return fmt.Errorf("cannot scan NULL into %#v", w.s)
+}
+
+func (w *ptrStructWrapper) ScanIndex(i int) any {
+ if i >= len(w.exportedFields) {
+ return fmt.Errorf("%#v only has %d public fields - %d is out of bounds", w.s, len(w.exportedFields), i)
+ }
+
+ return w.exportedFields[i].Addr().Interface()
+}
+
+type anySliceArrayReflect struct {
+ slice reflect.Value
+}
+
+func (a anySliceArrayReflect) Dimensions() []ArrayDimension {
+ if a.slice.IsNil() {
+ return nil
+ }
+
+ return []ArrayDimension{{Length: int32(a.slice.Len()), LowerBound: 1}}
+}
+
+func (a anySliceArrayReflect) Index(i int) any {
+ return a.slice.Index(i).Interface()
+}
+
+func (a anySliceArrayReflect) IndexType() any {
+ return reflect.New(a.slice.Type().Elem()).Elem().Interface()
+}
+
+func (a *anySliceArrayReflect) SetDimensions(dimensions []ArrayDimension) error {
+ sliceType := a.slice.Type()
+
+ if dimensions == nil {
+ a.slice.Set(reflect.Zero(sliceType))
+ return nil
+ }
+
+ elementCount := cardinality(dimensions)
+ slice := reflect.MakeSlice(sliceType, elementCount, elementCount)
+ a.slice.Set(slice)
+ return nil
+}
+
+func (a *anySliceArrayReflect) ScanIndex(i int) any {
+ return a.slice.Index(i).Addr().Interface()
+}
+
+func (a *anySliceArrayReflect) ScanIndexType() any {
+ return reflect.New(a.slice.Type().Elem()).Interface()
+}
+
+type anyMultiDimSliceArray struct {
+ slice reflect.Value
+ dims []ArrayDimension
+}
+
+func (a *anyMultiDimSliceArray) Dimensions() []ArrayDimension {
+ if a.slice.IsNil() {
+ return nil
+ }
+
+ s := a.slice
+ for {
+ a.dims = append(a.dims, ArrayDimension{Length: int32(s.Len()), LowerBound: 1})
+ if s.Len() > 0 {
+ s = s.Index(0)
+ } else {
+ break
+ }
+ if s.Type().Kind() == reflect.Slice {
+ } else {
+ break
+ }
+ }
+
+ return a.dims
+}
+
+func (a *anyMultiDimSliceArray) Index(i int) any {
+ if len(a.dims) == 1 {
+ return a.slice.Index(i).Interface()
+ }
+
+ indexes := make([]int, len(a.dims))
+ for j := len(a.dims) - 1; j >= 0; j-- {
+ dimLen := int(a.dims[j].Length)
+ indexes[j] = i % dimLen
+ i = i / dimLen
+ }
+
+ v := a.slice
+ for _, si := range indexes {
+ v = v.Index(si)
+ }
+
+ return v.Interface()
+}
+
+func (a *anyMultiDimSliceArray) IndexType() any {
+ lowestSliceType := a.slice.Type()
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ }
+ return reflect.New(lowestSliceType.Elem()).Elem().Interface()
+}
+
+func (a *anyMultiDimSliceArray) SetDimensions(dimensions []ArrayDimension) error {
+ sliceType := a.slice.Type()
+
+ if dimensions == nil {
+ a.slice.Set(reflect.Zero(sliceType))
+ return nil
+ }
+
+ switch len(dimensions) {
+ case 0:
+ // Empty, but non-nil array
+ slice := reflect.MakeSlice(sliceType, 0, 0)
+ a.slice.Set(slice)
+ return nil
+ case 1:
+ elementCount := cardinality(dimensions)
+ slice := reflect.MakeSlice(sliceType, elementCount, elementCount)
+ a.slice.Set(slice)
+ return nil
+ default:
+ sliceDimensionCount := 1
+ lowestSliceType := sliceType
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ sliceDimensionCount++
+ }
+
+ if sliceDimensionCount != len(dimensions) {
+ return fmt.Errorf("PostgreSQL array has %d dimensions but slice has %d dimensions", len(dimensions), sliceDimensionCount)
+ }
+
+ elementCount := cardinality(dimensions)
+ flatSlice := reflect.MakeSlice(lowestSliceType, elementCount, elementCount)
+
+ multiDimSlice := a.makeMultidimensionalSlice(sliceType, dimensions, flatSlice, 0)
+ a.slice.Set(multiDimSlice)
+
+ // Now that a.slice is a multi-dimensional slice with the underlying data pointed at flatSlice change a.slice to
+ // flatSlice so ScanIndex only has to handle simple one dimensional slices.
+ a.slice = flatSlice
+
+ return nil
+ }
+
+}
+
+func (a *anyMultiDimSliceArray) makeMultidimensionalSlice(sliceType reflect.Type, dimensions []ArrayDimension, flatSlice reflect.Value, flatSliceIdx int) reflect.Value {
+ if len(dimensions) == 1 {
+ endIdx := flatSliceIdx + int(dimensions[0].Length)
+ return flatSlice.Slice3(flatSliceIdx, endIdx, endIdx)
+ }
+
+ sliceLen := int(dimensions[0].Length)
+ slice := reflect.MakeSlice(sliceType, sliceLen, sliceLen)
+ for i := 0; i < sliceLen; i++ {
+ subSlice := a.makeMultidimensionalSlice(sliceType.Elem(), dimensions[1:], flatSlice, flatSliceIdx+(i*int(dimensions[1].Length)))
+ slice.Index(i).Set(subSlice)
+ }
+
+ return slice
+}
+
+func (a *anyMultiDimSliceArray) ScanIndex(i int) any {
+ return a.slice.Index(i).Addr().Interface()
+}
+
+func (a *anyMultiDimSliceArray) ScanIndexType() any {
+ lowestSliceType := a.slice.Type()
+ for ; lowestSliceType.Elem().Kind() == reflect.Slice; lowestSliceType = lowestSliceType.Elem() {
+ }
+ return reflect.New(lowestSliceType.Elem()).Interface()
+}
+
+type anyArrayArrayReflect struct {
+ array reflect.Value
+}
+
+func (a anyArrayArrayReflect) Dimensions() []ArrayDimension {
+ return []ArrayDimension{{Length: int32(a.array.Len()), LowerBound: 1}}
+}
+
+func (a anyArrayArrayReflect) Index(i int) any {
+ return a.array.Index(i).Interface()
+}
+
+func (a anyArrayArrayReflect) IndexType() any {
+ return reflect.New(a.array.Type().Elem()).Elem().Interface()
+}
+
+func (a *anyArrayArrayReflect) SetDimensions(dimensions []ArrayDimension) error {
+ if dimensions == nil {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan NULL into %v", a.array.Type().String())
+ }
+
+ if len(dimensions) != 1 {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan multi-dimensional array into %v", a.array.Type().String())
+ }
+
+ if int(dimensions[0].Length) != a.array.Len() {
+ return fmt.Errorf("anyArrayArrayReflect: cannot scan array with length %v into %v", dimensions[0].Length, a.array.Type().String())
+ }
+
+ return nil
+}
+
+func (a *anyArrayArrayReflect) ScanIndex(i int) any {
+ return a.array.Index(i).Addr().Interface()
+}
+
+func (a *anyArrayArrayReflect) ScanIndexType() any {
+ return reflect.New(a.array.Type().Elem()).Interface()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go b/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go
new file mode 100644
index 0000000..a247705
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/bytea.go
@@ -0,0 +1,255 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+)
+
+type BytesScanner interface {
+ // ScanBytes receives a byte slice of driver memory that is only valid until the next database method call.
+ ScanBytes(v []byte) error
+}
+
+type BytesValuer interface {
+ // BytesValue returns a byte slice of the byte data. The caller must not change the returned slice.
+ BytesValue() ([]byte, error)
+}
+
+// DriverBytes is a byte slice that holds a reference to memory owned by the driver. It is only valid from the time it
+// is scanned until Rows.Next or Rows.Close is called. It is never safe to use DriverBytes with QueryRow as Row.Scan
+// internally calls Rows.Close before returning.
+type DriverBytes []byte
+
+func (b *DriverBytes) ScanBytes(v []byte) error {
+ *b = v
+ return nil
+}
+
+// PreallocBytes is a byte slice of preallocated memory that scanned bytes will be copied to. If it is too small a new
+// slice will be allocated.
+type PreallocBytes []byte
+
+func (b *PreallocBytes) ScanBytes(v []byte) error {
+ if v == nil {
+ *b = nil
+ return nil
+ }
+
+ if len(v) <= len(*b) {
+ *b = (*b)[:len(v)]
+ } else {
+ *b = make(PreallocBytes, len(v))
+ }
+ copy(*b, v)
+ return nil
+}
+
+// UndecodedBytes can be used as a scan target to get the raw bytes from PostgreSQL without any decoding.
+type UndecodedBytes []byte
+
+type scanPlanAnyToUndecodedBytes struct{}
+
+func (scanPlanAnyToUndecodedBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*UndecodedBytes)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type ByteaCodec struct{}
+
+func (ByteaCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (ByteaCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (ByteaCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case []byte:
+ return encodePlanBytesCodecBinaryBytes{}
+ case BytesValuer:
+ return encodePlanBytesCodecBinaryBytesValuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case []byte:
+ return encodePlanBytesCodecTextBytes{}
+ case BytesValuer:
+ return encodePlanBytesCodecTextBytesValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanBytesCodecBinaryBytes struct{}
+
+func (encodePlanBytesCodecBinaryBytes) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.([]byte)
+ if b == nil {
+ return nil, nil
+ }
+
+ return append(buf, b...), nil
+}
+
+type encodePlanBytesCodecBinaryBytesValuer struct{}
+
+func (encodePlanBytesCodecBinaryBytesValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BytesValuer).BytesValue()
+ if err != nil {
+ return nil, err
+ }
+ if b == nil {
+ return nil, nil
+ }
+
+ return append(buf, b...), nil
+}
+
+type encodePlanBytesCodecTextBytes struct{}
+
+func (encodePlanBytesCodecTextBytes) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.([]byte)
+ if b == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(b)...)
+ return buf, nil
+}
+
+type encodePlanBytesCodecTextBytesValuer struct{}
+
+func (encodePlanBytesCodecTextBytesValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b, err := value.(BytesValuer).BytesValue()
+ if err != nil {
+ return nil, err
+ }
+ if b == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(b)...)
+ return buf, nil
+}
+
+func (ByteaCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *[]byte:
+ return scanPlanBinaryBytesToBytes{}
+ case BytesScanner:
+ return scanPlanBinaryBytesToBytesScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *[]byte:
+ return scanPlanTextByteaToBytes{}
+ case BytesScanner:
+ return scanPlanTextByteaToBytesScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryBytesToBytes struct{}
+
+func (scanPlanBinaryBytesToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanBinaryBytesToBytesScanner struct{}
+
+func (scanPlanBinaryBytesToBytesScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BytesScanner)
+ return scanner.ScanBytes(src)
+}
+
+type scanPlanTextByteaToBytes struct{}
+
+func (scanPlanTextByteaToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ buf, err := decodeHexBytea(src)
+ if err != nil {
+ return err
+ }
+ *dstBuf = buf
+
+ return nil
+}
+
+type scanPlanTextByteaToBytesScanner struct{}
+
+func (scanPlanTextByteaToBytesScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BytesScanner)
+ buf, err := decodeHexBytea(src)
+ if err != nil {
+ return err
+ }
+ return scanner.ScanBytes(buf)
+}
+
+func decodeHexBytea(src []byte) ([]byte, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if len(src) < 2 || src[0] != '\\' || src[1] != 'x' {
+ return nil, fmt.Errorf("invalid hex format")
+ }
+
+ buf := make([]byte, (len(src)-2)/2)
+ _, err := hex.Decode(buf, src[2:])
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func (c ByteaCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c ByteaCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var buf []byte
+ err := codecScan(c, m, oid, format, src, &buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/circle.go b/vendor/github.com/jackc/pgx/v5/pgtype/circle.go
new file mode 100644
index 0000000..e8f118c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/circle.go
@@ -0,0 +1,222 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type CircleScanner interface {
+ ScanCircle(v Circle) error
+}
+
+type CircleValuer interface {
+ CircleValue() (Circle, error)
+}
+
+type Circle struct {
+ P Vec2
+ R float64
+ Valid bool
+}
+
+func (c *Circle) ScanCircle(v Circle) error {
+ *c = v
+ return nil
+}
+
+func (c Circle) CircleValue() (Circle, error) {
+ return c, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Circle) Scan(src any) error {
+ if src == nil {
+ *dst = Circle{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToCircleScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Circle) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := CircleCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type CircleCodec struct{}
+
+func (CircleCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (CircleCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (CircleCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(CircleValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanCircleCodecBinary{}
+ case TextFormatCode:
+ return encodePlanCircleCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanCircleCodecBinary struct{}
+
+func (encodePlanCircleCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ circle, err := value.(CircleValuer).CircleValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !circle.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.P.Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(circle.R))
+ return buf, nil
+}
+
+type encodePlanCircleCodecText struct{}
+
+func (encodePlanCircleCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ circle, err := value.(CircleValuer).CircleValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !circle.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`<(%s,%s),%s>`,
+ strconv.FormatFloat(circle.P.X, 'f', -1, 64),
+ strconv.FormatFloat(circle.P.Y, 'f', -1, 64),
+ strconv.FormatFloat(circle.R, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (CircleCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case CircleScanner:
+ return scanPlanBinaryCircleToCircleScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case CircleScanner:
+ return scanPlanTextAnyToCircleScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c CircleCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c CircleCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var circle Circle
+ err := codecScan(c, m, oid, format, src, &circle)
+ if err != nil {
+ return nil, err
+ }
+ return circle, nil
+}
+
+type scanPlanBinaryCircleToCircleScanner struct{}
+
+func (scanPlanBinaryCircleToCircleScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(CircleScanner)
+
+ if src == nil {
+ return scanner.ScanCircle(Circle{})
+ }
+
+ if len(src) != 24 {
+ return fmt.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+ r := binary.BigEndian.Uint64(src[16:])
+
+ return scanner.ScanCircle(Circle{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ R: math.Float64frombits(r),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToCircleScanner struct{}
+
+func (scanPlanTextAnyToCircleScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(CircleScanner)
+
+ if src == nil {
+ return scanner.ScanCircle(Circle{})
+ }
+
+ if len(src) < 9 {
+ return fmt.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ str := string(src[2:])
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+2 : len(str)-1]
+
+ r, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanCircle(Circle{P: Vec2{x, y}, R: r, Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/composite.go b/vendor/github.com/jackc/pgx/v5/pgtype/composite.go
new file mode 100644
index 0000000..fb37232
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/composite.go
@@ -0,0 +1,602 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// CompositeIndexGetter is a type accessed by index that can be converted into a PostgreSQL composite.
+type CompositeIndexGetter interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // Index returns the element at i.
+ Index(i int) any
+}
+
+// CompositeIndexScanner is a type accessed by index that can be scanned from a PostgreSQL composite.
+type CompositeIndexScanner interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // ScanIndex returns a value usable as a scan target for i.
+ ScanIndex(i int) any
+}
+
+type CompositeCodecField struct {
+ Name string
+ Type *Type
+}
+
+type CompositeCodec struct {
+ Fields []CompositeCodecField
+}
+
+func (c *CompositeCodec) FormatSupported(format int16) bool {
+ for _, f := range c.Fields {
+ if !f.Type.Codec.FormatSupported(format) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *CompositeCodec) PreferredFormat() int16 {
+ if c.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *CompositeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(CompositeIndexGetter); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanCompositeCodecCompositeIndexGetterToBinary{cc: c, m: m}
+ case TextFormatCode:
+ return &encodePlanCompositeCodecCompositeIndexGetterToText{cc: c, m: m}
+ }
+
+ return nil
+}
+
+type encodePlanCompositeCodecCompositeIndexGetterToBinary struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *encodePlanCompositeCodecCompositeIndexGetterToBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(CompositeIndexGetter)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ builder := NewCompositeBinaryBuilder(plan.m, buf)
+ for i, field := range plan.cc.Fields {
+ builder.AppendValue(field.Type.OID, getter.Index(i))
+ }
+
+ return builder.Finish()
+}
+
+type encodePlanCompositeCodecCompositeIndexGetterToText struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *encodePlanCompositeCodecCompositeIndexGetterToText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(CompositeIndexGetter)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ b := NewCompositeTextBuilder(plan.m, buf)
+ for i, field := range plan.cc.Fields {
+ b.AppendValue(field.Type.OID, getter.Index(i))
+ }
+
+ return b.Finish()
+}
+
+func (c *CompositeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanBinaryCompositeToCompositeIndexScanner{cc: c, m: m}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanTextCompositeToCompositeIndexScanner{cc: c, m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryCompositeToCompositeIndexScanner struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *scanPlanBinaryCompositeToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeBinaryScanner(plan.m, src)
+ for i, field := range plan.cc.Fields {
+ if scanner.Next() {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(field.Type.OID, BinaryFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to encode %v into OID %d in binary format", field, field.Type.OID)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New("read past end of composite")
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type scanPlanTextCompositeToCompositeIndexScanner struct {
+ cc *CompositeCodec
+ m *Map
+}
+
+func (plan *scanPlanTextCompositeToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeTextScanner(plan.m, src)
+ for i, field := range plan.cc.Fields {
+ if scanner.Next() {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(field.Type.OID, TextFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to encode %v into OID %d in text format", field, field.Type.OID)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New("read past end of composite")
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *CompositeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *CompositeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ scanner := NewCompositeTextScanner(m, src)
+ values := make(map[string]any, len(c.Fields))
+ for i := 0; scanner.Next() && i < len(c.Fields); i++ {
+ var v any
+ fieldPlan := m.PlanScan(c.Fields[i].Type.OID, TextFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in text format into %v", c.Fields[i].Type.OID, v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[c.Fields[i].Name] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ case BinaryFormatCode:
+ scanner := NewCompositeBinaryScanner(m, src)
+ values := make(map[string]any, len(c.Fields))
+ for i := 0; scanner.Next() && i < len(c.Fields); i++ {
+ var v any
+ fieldPlan := m.PlanScan(scanner.OID(), BinaryFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[c.Fields[i].Name] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+
+}
+
+type CompositeBinaryScanner struct {
+ m *Map
+ rp int
+ src []byte
+
+ fieldCount int32
+ fieldBytes []byte
+ fieldOID uint32
+ err error
+}
+
+// NewCompositeBinaryScanner a scanner over a binary encoded composite balue.
+func NewCompositeBinaryScanner(m *Map, src []byte) *CompositeBinaryScanner {
+ rp := 0
+ if len(src[rp:]) < 4 {
+ return &CompositeBinaryScanner{err: fmt.Errorf("Record incomplete %v", src)}
+ }
+
+ fieldCount := int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ return &CompositeBinaryScanner{
+ m: m,
+ rp: rp,
+ src: src,
+ fieldCount: fieldCount,
+ }
+}
+
+// Next advances the scanner to the next field. It returns false after the last field is read or an error occurs. After
+// Next returns false, the Err method can be called to check if any errors occurred.
+func (cfs *CompositeBinaryScanner) Next() bool {
+ if cfs.err != nil {
+ return false
+ }
+
+ if cfs.rp == len(cfs.src) {
+ return false
+ }
+
+ if len(cfs.src[cfs.rp:]) < 8 {
+ cfs.err = fmt.Errorf("Record incomplete %v", cfs.src)
+ return false
+ }
+ cfs.fieldOID = binary.BigEndian.Uint32(cfs.src[cfs.rp:])
+ cfs.rp += 4
+
+ fieldLen := int(int32(binary.BigEndian.Uint32(cfs.src[cfs.rp:])))
+ cfs.rp += 4
+
+ if fieldLen >= 0 {
+ if len(cfs.src[cfs.rp:]) < fieldLen {
+ cfs.err = fmt.Errorf("Record incomplete rp=%d src=%v", cfs.rp, cfs.src)
+ return false
+ }
+ cfs.fieldBytes = cfs.src[cfs.rp : cfs.rp+fieldLen]
+ cfs.rp += fieldLen
+ } else {
+ cfs.fieldBytes = nil
+ }
+
+ return true
+}
+
+func (cfs *CompositeBinaryScanner) FieldCount() int {
+ return int(cfs.fieldCount)
+}
+
+// Bytes returns the bytes of the field most recently read by Scan().
+func (cfs *CompositeBinaryScanner) Bytes() []byte {
+ return cfs.fieldBytes
+}
+
+// OID returns the OID of the field most recently read by Scan().
+func (cfs *CompositeBinaryScanner) OID() uint32 {
+ return cfs.fieldOID
+}
+
+// Err returns any error encountered by the scanner.
+func (cfs *CompositeBinaryScanner) Err() error {
+ return cfs.err
+}
+
+type CompositeTextScanner struct {
+ m *Map
+ rp int
+ src []byte
+
+ fieldBytes []byte
+ err error
+}
+
+// NewCompositeTextScanner a scanner over a text encoded composite value.
+func NewCompositeTextScanner(m *Map, src []byte) *CompositeTextScanner {
+ if len(src) < 2 {
+ return &CompositeTextScanner{err: fmt.Errorf("Record incomplete %v", src)}
+ }
+
+ if src[0] != '(' {
+ return &CompositeTextScanner{err: fmt.Errorf("composite text format must start with '('")}
+ }
+
+ if src[len(src)-1] != ')' {
+ return &CompositeTextScanner{err: fmt.Errorf("composite text format must end with ')'")}
+ }
+
+ return &CompositeTextScanner{
+ m: m,
+ rp: 1,
+ src: src,
+ }
+}
+
+// Next advances the scanner to the next field. It returns false after the last field is read or an error occurs. After
+// Next returns false, the Err method can be called to check if any errors occurred.
+func (cfs *CompositeTextScanner) Next() bool {
+ if cfs.err != nil {
+ return false
+ }
+
+ if cfs.rp == len(cfs.src) {
+ return false
+ }
+
+ switch cfs.src[cfs.rp] {
+ case ',', ')': // null
+ cfs.rp++
+ cfs.fieldBytes = nil
+ return true
+ case '"': // quoted value
+ cfs.rp++
+ cfs.fieldBytes = make([]byte, 0, 16)
+ for {
+ ch := cfs.src[cfs.rp]
+
+ if ch == '"' {
+ cfs.rp++
+ if cfs.src[cfs.rp] == '"' {
+ cfs.fieldBytes = append(cfs.fieldBytes, '"')
+ cfs.rp++
+ } else {
+ break
+ }
+ } else if ch == '\\' {
+ cfs.rp++
+ cfs.fieldBytes = append(cfs.fieldBytes, cfs.src[cfs.rp])
+ cfs.rp++
+ } else {
+ cfs.fieldBytes = append(cfs.fieldBytes, ch)
+ cfs.rp++
+ }
+ }
+ cfs.rp++
+ return true
+ default: // unquoted value
+ start := cfs.rp
+ for {
+ ch := cfs.src[cfs.rp]
+ if ch == ',' || ch == ')' {
+ break
+ }
+ cfs.rp++
+ }
+ cfs.fieldBytes = cfs.src[start:cfs.rp]
+ cfs.rp++
+ return true
+ }
+}
+
+// Bytes returns the bytes of the field most recently read by Scan().
+func (cfs *CompositeTextScanner) Bytes() []byte {
+ return cfs.fieldBytes
+}
+
+// Err returns any error encountered by the scanner.
+func (cfs *CompositeTextScanner) Err() error {
+ return cfs.err
+}
+
+type CompositeBinaryBuilder struct {
+ m *Map
+ buf []byte
+ startIdx int
+ fieldCount uint32
+ err error
+}
+
+func NewCompositeBinaryBuilder(m *Map, buf []byte) *CompositeBinaryBuilder {
+ startIdx := len(buf)
+ buf = append(buf, 0, 0, 0, 0) // allocate room for number of fields
+ return &CompositeBinaryBuilder{m: m, buf: buf, startIdx: startIdx}
+}
+
+func (b *CompositeBinaryBuilder) AppendValue(oid uint32, field any) {
+ if b.err != nil {
+ return
+ }
+
+ if field == nil {
+ b.buf = pgio.AppendUint32(b.buf, oid)
+ b.buf = pgio.AppendInt32(b.buf, -1)
+ b.fieldCount++
+ return
+ }
+
+ plan := b.m.PlanEncode(oid, BinaryFormatCode, field)
+ if plan == nil {
+ b.err = fmt.Errorf("unable to encode %v into OID %d in binary format", field, oid)
+ return
+ }
+
+ b.buf = pgio.AppendUint32(b.buf, oid)
+ lengthPos := len(b.buf)
+ b.buf = pgio.AppendInt32(b.buf, -1)
+ fieldBuf, err := plan.Encode(field, b.buf)
+ if err != nil {
+ b.err = err
+ return
+ }
+ if fieldBuf != nil {
+ binary.BigEndian.PutUint32(fieldBuf[lengthPos:], uint32(len(fieldBuf)-len(b.buf)))
+ b.buf = fieldBuf
+ }
+
+ b.fieldCount++
+}
+
+func (b *CompositeBinaryBuilder) Finish() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+
+ binary.BigEndian.PutUint32(b.buf[b.startIdx:], b.fieldCount)
+ return b.buf, nil
+}
+
+type CompositeTextBuilder struct {
+ m *Map
+ buf []byte
+ startIdx int
+ fieldCount uint32
+ err error
+ fieldBuf [32]byte
+}
+
+func NewCompositeTextBuilder(m *Map, buf []byte) *CompositeTextBuilder {
+ buf = append(buf, '(') // allocate room for number of fields
+ return &CompositeTextBuilder{m: m, buf: buf}
+}
+
+func (b *CompositeTextBuilder) AppendValue(oid uint32, field any) {
+ if b.err != nil {
+ return
+ }
+
+ if field == nil {
+ b.buf = append(b.buf, ',')
+ return
+ }
+
+ plan := b.m.PlanEncode(oid, TextFormatCode, field)
+ if plan == nil {
+ b.err = fmt.Errorf("unable to encode %v into OID %d in text format", field, oid)
+ return
+ }
+
+ fieldBuf, err := plan.Encode(field, b.fieldBuf[0:0])
+ if err != nil {
+ b.err = err
+ return
+ }
+ if fieldBuf != nil {
+ b.buf = append(b.buf, quoteCompositeFieldIfNeeded(string(fieldBuf))...)
+ }
+
+ b.buf = append(b.buf, ',')
+}
+
+func (b *CompositeTextBuilder) Finish() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+
+ b.buf[len(b.buf)-1] = ')'
+ return b.buf, nil
+}
+
+var quoteCompositeReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteCompositeField(src string) string {
+ return `"` + quoteCompositeReplacer.Replace(src) + `"`
+}
+
+func quoteCompositeFieldIfNeeded(src string) string {
+ if src == "" || src[0] == ' ' || src[len(src)-1] == ' ' || strings.ContainsAny(src, `(),"\`) {
+ return quoteCompositeField(src)
+ }
+ return src
+}
+
+// CompositeFields represents the values of a composite value. It can be used as an encoding source or as a scan target.
+// It cannot scan a NULL, but the composite fields can be NULL.
+type CompositeFields []any
+
+func (cf CompositeFields) SkipUnderlyingTypePlan() {}
+
+func (cf CompositeFields) IsNull() bool {
+ return cf == nil
+}
+
+func (cf CompositeFields) Index(i int) any {
+ return cf[i]
+}
+
+func (cf CompositeFields) ScanNull() error {
+ return fmt.Errorf("cannot scan NULL into CompositeFields")
+}
+
+func (cf CompositeFields) ScanIndex(i int) any {
+ return cf[i]
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/convert.go b/vendor/github.com/jackc/pgx/v5/pgtype/convert.go
new file mode 100644
index 0000000..8a9cee9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/convert.go
@@ -0,0 +1,108 @@
+package pgtype
+
+import (
+ "reflect"
+)
+
+func NullAssignTo(dst any) error {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return &nullAssignmentError{dst: dst}
+ }
+
+ dstVal := dstPtr.Elem()
+
+ switch dstVal.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+ return nil
+ }
+
+ return &nullAssignmentError{dst: dst}
+}
+
+var kindTypes map[reflect.Kind]reflect.Type
+
+func toInterface(dst reflect.Value, t reflect.Type) (any, bool) {
+ nextDst := dst.Convert(t)
+ return nextDst.Interface(), dst.Type() != nextDst.Type()
+}
+
+// GetAssignToDstType attempts to convert dst to something AssignTo can assign
+// to. If dst is a pointer to pointer it allocates a value and returns the
+// dereferences pointer. If dst is a named type such as *Foo where Foo is type
+// Foo int16, it converts dst to *int16.
+//
+// GetAssignToDstType returns the converted dst and a bool representing if any
+// change was made.
+func GetAssignToDstType(dst any) (any, bool) {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return nil, false
+ }
+
+ dstVal := dstPtr.Elem()
+
+ // if dst is a pointer to pointer, allocate space try again with the dereferenced pointer
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal.Set(reflect.New(dstVal.Type().Elem()))
+ return dstVal.Interface(), true
+ }
+
+ // if dst is pointer to a base type that has been renamed
+ if baseValType, ok := kindTypes[dstVal.Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(baseValType))
+ }
+
+ if dstVal.Kind() == reflect.Slice {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.SliceOf(baseElemType)))
+ }
+ }
+
+ if dstVal.Kind() == reflect.Array {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(dstVal.Len(), baseElemType)))
+ }
+ }
+
+ if dstVal.Kind() == reflect.Struct {
+ if dstVal.Type().NumField() == 1 && dstVal.Type().Field(0).Anonymous {
+ dstPtr = dstVal.Field(0).Addr()
+ nested := dstVal.Type().Field(0).Type
+ if nested.Kind() == reflect.Array {
+ if baseElemType, ok := kindTypes[nested.Elem().Kind()]; ok {
+ return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(nested.Len(), baseElemType)))
+ }
+ }
+ if _, ok := kindTypes[nested.Kind()]; ok && dstPtr.CanInterface() {
+ return dstPtr.Interface(), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func init() {
+ kindTypes = map[reflect.Kind]reflect.Type{
+ reflect.Bool: reflect.TypeOf(false),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.String: reflect.TypeOf(""),
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/date.go b/vendor/github.com/jackc/pgx/v5/pgtype/date.go
new file mode 100644
index 0000000..784b16d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/date.go
@@ -0,0 +1,351 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type DateScanner interface {
+ ScanDate(v Date) error
+}
+
+type DateValuer interface {
+ DateValue() (Date, error)
+}
+
+type Date struct {
+ Time time.Time
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (d *Date) ScanDate(v Date) error {
+ *d = v
+ return nil
+}
+
+func (d Date) DateValue() (Date, error) {
+ return d, nil
+}
+
+const (
+ negativeInfinityDayOffset = -2147483648
+ infinityDayOffset = 2147483647
+)
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Date) Scan(src any) error {
+ if src == nil {
+ *dst = Date{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToDateScanner{}.Scan([]byte(src), dst)
+ case time.Time:
+ *dst = Date{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Date) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ if src.InfinityModifier != Finite {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+}
+
+func (src Date) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case Finite:
+ s = src.Time.Format("2006-01-02")
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (dst *Date) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *dst = Date{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *dst = Date{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Date{Valid: true, InfinityModifier: -Infinity}
+ default:
+ t, err := time.ParseInLocation("2006-01-02", *s, time.UTC)
+ if err != nil {
+ return err
+ }
+
+ *dst = Date{Time: t, Valid: true}
+ }
+
+ return nil
+}
+
+type DateCodec struct{}
+
+func (DateCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (DateCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (DateCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(DateValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanDateCodecBinary{}
+ case TextFormatCode:
+ return encodePlanDateCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanDateCodecBinary struct{}
+
+func (encodePlanDateCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ date, err := value.(DateValuer).DateValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !date.Valid {
+ return nil, nil
+ }
+
+ var daysSinceDateEpoch int32
+ switch date.InfinityModifier {
+ case Finite:
+ tUnix := time.Date(date.Time.Year(), date.Time.Month(), date.Time.Day(), 0, 0, 0, 0, time.UTC).Unix()
+ dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
+
+ secSinceDateEpoch := tUnix - dateEpoch
+ daysSinceDateEpoch = int32(secSinceDateEpoch / 86400)
+ case Infinity:
+ daysSinceDateEpoch = infinityDayOffset
+ case NegativeInfinity:
+ daysSinceDateEpoch = negativeInfinityDayOffset
+ }
+
+ return pgio.AppendInt32(buf, daysSinceDateEpoch), nil
+}
+
+type encodePlanDateCodecText struct{}
+
+func (encodePlanDateCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ date, err := value.(DateValuer).DateValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !date.Valid {
+ return nil, nil
+ }
+
+ switch date.InfinityModifier {
+ case Finite:
+ // Year 0000 is 1 BC
+ bc := false
+ year := date.Time.Year()
+ if year <= 0 {
+ year = -year + 1
+ bc = true
+ }
+
+ yearBytes := strconv.AppendInt(make([]byte, 0, 6), int64(year), 10)
+ for i := len(yearBytes); i < 4; i++ {
+ buf = append(buf, '0')
+ }
+ buf = append(buf, yearBytes...)
+ buf = append(buf, '-')
+ if date.Time.Month() < 10 {
+ buf = append(buf, '0')
+ }
+ buf = strconv.AppendInt(buf, int64(date.Time.Month()), 10)
+ buf = append(buf, '-')
+ if date.Time.Day() < 10 {
+ buf = append(buf, '0')
+ }
+ buf = strconv.AppendInt(buf, int64(date.Time.Day()), 10)
+
+ if bc {
+ buf = append(buf, " BC"...)
+ }
+ case Infinity:
+ buf = append(buf, "infinity"...)
+ case NegativeInfinity:
+ buf = append(buf, "-infinity"...)
+ }
+
+ return buf, nil
+}
+
+func (DateCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case DateScanner:
+ return scanPlanBinaryDateToDateScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case DateScanner:
+ return scanPlanTextAnyToDateScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryDateToDateScanner struct{}
+
+func (scanPlanBinaryDateToDateScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(DateScanner)
+
+ if src == nil {
+ return scanner.ScanDate(Date{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for date: %v", len(src))
+ }
+
+ dayOffset := int32(binary.BigEndian.Uint32(src))
+
+ switch dayOffset {
+ case infinityDayOffset:
+ return scanner.ScanDate(Date{InfinityModifier: Infinity, Valid: true})
+ case negativeInfinityDayOffset:
+ return scanner.ScanDate(Date{InfinityModifier: -Infinity, Valid: true})
+ default:
+ t := time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.UTC)
+ return scanner.ScanDate(Date{Time: t, Valid: true})
+ }
+}
+
+type scanPlanTextAnyToDateScanner struct{}
+
+var dateRegexp = regexp.MustCompile(`^(\d{4,})-(\d\d)-(\d\d)( BC)?$`)
+
+func (scanPlanTextAnyToDateScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(DateScanner)
+
+ if src == nil {
+ return scanner.ScanDate(Date{})
+ }
+
+ sbuf := string(src)
+ match := dateRegexp.FindStringSubmatch(sbuf)
+ if match != nil {
+ year, err := strconv.ParseInt(match[1], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (year): %w", err)
+ }
+
+ month, err := strconv.ParseInt(match[2], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
+ }
+
+ day, err := strconv.ParseInt(match[3], 10, 32)
+ if err != nil {
+ return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
+ }
+
+ // BC matched
+ if len(match[4]) > 0 {
+ year = -year + 1
+ }
+
+ t := time.Date(int(year), time.Month(month), int(day), 0, 0, 0, 0, time.UTC)
+ return scanner.ScanDate(Date{Time: t, Valid: true})
+ }
+
+ switch sbuf {
+ case "infinity":
+ return scanner.ScanDate(Date{InfinityModifier: Infinity, Valid: true})
+ case "-infinity":
+ return scanner.ScanDate(Date{InfinityModifier: -Infinity, Valid: true})
+ default:
+ return fmt.Errorf("invalid date format")
+ }
+}
+
+func (c DateCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var date Date
+ err := codecScan(c, m, oid, format, src, &date)
+ if err != nil {
+ return nil, err
+ }
+
+ if date.InfinityModifier != Finite {
+ return date.InfinityModifier.String(), nil
+ }
+
+ return date.Time, nil
+}
+
+func (c DateCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var date Date
+ err := codecScan(c, m, oid, format, src, &date)
+ if err != nil {
+ return nil, err
+ }
+
+ if date.InfinityModifier != Finite {
+ return date.InfinityModifier, nil
+ }
+
+ return date.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
new file mode 100644
index 0000000..d56c1dc
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
@@ -0,0 +1,188 @@
+// Package pgtype converts between Go and PostgreSQL values.
+/*
+The primary type is the Map type. It is a map of PostgreSQL types identified by OID (object ID) to a Codec. A Codec is
+responsible for converting between Go and PostgreSQL values. NewMap creates a Map with all supported standard PostgreSQL
+types already registered. Additional types can be registered with Map.RegisterType.
+
+Use Map.Scan and Map.Encode to decode PostgreSQL values to Go and encode Go values to PostgreSQL respectively.
+
+Base Type Mapping
+
+pgtype maps between all common base types directly between Go and PostgreSQL. In particular:
+
+ Go PostgreSQL
+ -----------------------
+ string varchar
+ text
+
+ // Integers are automatically be converted to any other integer type if
+ // it can be done without overflow or underflow.
+ int8
+ int16 smallint
+ int32 int
+ int64 bigint
+ int
+ uint8
+ uint16
+ uint32
+ uint64
+ uint
+
+ // Floats are strict and do not automatically convert like integers.
+ float32 float4
+ float64 float8
+
+ time.Time date
+ timestamp
+ timestamptz
+
+ netip.Addr inet
+ netip.Prefix cidr
+
+ []byte bytea
+
+Null Values
+
+pgtype can map NULLs in two ways. The first is types that can directly represent NULL such as Int4. They work in a
+similar fashion to database/sql. The second is to use a pointer to a pointer.
+
+ var foo pgtype.Text
+ var bar *string
+ err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&foo, &bar)
+ if err != nil {
+ return err
+ }
+
+JSON Support
+
+pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
+
+Extending Existing PostgreSQL Type Support
+
+Generally, all Codecs will support interfaces that can be implemented to enable scanning and encoding. For example,
+PointCodec can use any Go type that implements the PointScanner and PointValuer interfaces. So rather than use
+pgtype.Point and application can directly use its own point type with pgtype as long as it implements those interfaces.
+
+See example_custom_type_test.go for an example of a custom type for the PostgreSQL point type.
+
+Sometimes pgx supports a PostgreSQL type such as numeric but the Go type is in an external package that does not have
+pgx support such as github.com/shopspring/decimal. These types can be registered with pgtype with custom conversion
+logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for example
+integrations.
+
+New PostgreSQL Type Support
+
+pgtype uses the PostgreSQL OID to determine how to encode or decode a value. pgtype supports array, composite, domain,
+and enum types. However, any type created in PostgreSQL with CREATE TYPE will receive a new OID. This means that the OID
+of each new PostgreSQL type must be registered for pgtype to handle values of that type with the correct Codec.
+
+The pgx.Conn LoadType method can return a *Type for array, composite, domain, and enum types by inspecting the database
+metadata. This *Type can then be registered with Map.RegisterType.
+
+For example, the following function could be called after a connection is established:
+
+ func RegisterDataTypes(ctx context.Context, conn *pgx.Conn) error {
+ dataTypeNames := []string{
+ "foo",
+ "_foo",
+ "bar",
+ "_bar",
+ }
+
+ for _, typeName := range dataTypeNames {
+ dataType, err := conn.LoadType(ctx, typeName)
+ if err != nil {
+ return err
+ }
+ conn.TypeMap().RegisterType(dataType)
+ }
+
+ return nil
+ }
+
+A type cannot be registered unless all types it depends on are already registered. e.g. An array type cannot be
+registered until its element type is registered.
+
+ArrayCodec implements support for arrays. If pgtype supports type T then it can easily support []T by registering an
+ArrayCodec for the appropriate PostgreSQL OID. In addition, Array[T] type can support multi-dimensional arrays.
+
+CompositeCodec implements support for PostgreSQL composite types. Go structs can be scanned into if the public fields of
+the struct are in the exact order and type of the PostgreSQL type or by implementing CompositeIndexScanner and
+CompositeIndexGetter.
+
+Domain types are treated as their underlying type if the underlying type and the domain type are registered.
+
+PostgreSQL enums can usually be treated as text. However, EnumCodec implements support for interning strings which can
+reduce memory usage.
+
+While pgtype will often still work with unregistered types it is highly recommended that all types be registered due to
+an improvement in performance and the elimination of certain edge cases.
+
+If an entirely new PostgreSQL type (e.g. PostGIS types) is used then the application or a library can create a new
+Codec. Then the OID / Codec mapping can be registered with Map.RegisterType. There is no difference between a Codec
+defined and registered by the application and a Codec built in to pgtype. See any of the Codecs in pgtype for Codec
+examples and for examples of type registration.
+
+Encoding Unknown Types
+
+pgtype works best when the OID of the PostgreSQL type is known. But in some cases such as using the simple protocol the
+OID is unknown. In this case Map.RegisterDefaultPgType can be used to register an assumed OID for a particular Go type.
+
+Renamed Types
+
+If pgtype does not recognize a type and that type is a renamed simple type simple (e.g. type MyInt32 int32) pgtype acts
+as if it is the underlying type. It currently cannot automatically detect the underlying type of renamed structs (eg.g.
+type MyTime time.Time).
+
+Compatibility with database/sql
+
+pgtype also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer
+interfaces.
+
+Encoding Typed Nils
+
+pgtype encodes untyped and typed nils (e.g. nil and []byte(nil)) to the SQL NULL value without going through the Codec
+system. This means that Codecs and other encoding logic do not have to handle nil or *T(nil).
+
+However, database/sql compatibility requires Value to be called on T(nil) when T implements driver.Valuer. Therefore,
+driver.Valuer values are only considered NULL when *T(nil) where driver.Valuer is implemented on T not on *T. See
+https://github.com/golang/go/issues/8415 and
+https://github.com/golang/go/commit/0ce1d79a6a771f7449ec493b993ed2a720917870.
+
+Child Records
+
+pgtype's support for arrays and composite records can be used to load records and their children in a single query. See
+example_child_records_test.go for an example.
+
+Overview of Scanning Implementation
+
+The first step is to use the OID to lookup the correct Codec. If the OID is unavailable, Map will try to find the OID
+from previous calls of Map.RegisterDefaultPgType. The Map will call the Codec's PlanScan method to get a plan for
+scanning into the Go value. A Codec will support scanning into one or more Go types. Oftentime these Go types are
+interfaces rather than explicit types. For example, PointCodec can use any Go type that implements the PointScanner and
+PointValuer interfaces.
+
+If a Go value is not supported directly by a Codec then Map will try wrapping it with additional logic and try again.
+For example, Int8Codec does not support scanning into a renamed type (e.g. type myInt64 int64). But Map will detect that
+myInt64 is a renamed type and create a plan that converts the value to the underlying int64 type and then passes that to
+the Codec (see TryFindUnderlyingTypeScanPlan).
+
+These plan wrappers are contained in Map.TryWrapScanPlanFuncs. By default these contain shared logic to handle renamed
+types, pointers to pointers, slices, composite types, etc. Additional plan wrappers can be added to seamlessly integrate
+types that do not support pgx directly. For example, the before mentioned
+https://github.com/jackc/pgx-shopspring-decimal package detects decimal.Decimal values, wraps them in something
+implementing NumericScanner and passes that to the Codec.
+
+Map.Scan and Map.Encode are convenience methods that wrap Map.PlanScan and Map.PlanEncode. Determining how to scan or
+encode a particular type may be a time consuming operation. Hence the planning and execution steps of a conversion are
+internally separated.
+
+Reducing Compiled Binary Size
+
+pgx.QueryExecModeExec and pgx.QueryExecModeSimpleProtocol require the default PostgreSQL type to be registered for each
+Go type used as a query parameter. By default pgx does this for all supported types and their array variants. If an
+application does not use those query execution modes or manually registers the default PostgreSQL type for the types it
+uses as query parameters it can use the build tag nopgxregisterdefaulttypes. This omits the default type registration
+and reduces the compiled binary size by ~2MB.
+*/
+package pgtype
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go
new file mode 100644
index 0000000..5e787c1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/enum_codec.go
@@ -0,0 +1,109 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// EnumCodec is a codec that caches the strings it decodes. If the same string is read multiple times only one copy is
+// allocated. These strings are only garbage collected when the EnumCodec is garbage collected. EnumCodec can be used
+// for any text type not only enums, but it should only be used when there are a small number of possible values.
+type EnumCodec struct {
+ membersMap map[string]string // map to quickly lookup member and reuse string instead of allocating
+}
+
+func (EnumCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (EnumCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (EnumCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodePlanTextCodecString{}
+ case []byte:
+ return encodePlanTextCodecByteSlice{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+func (c *EnumCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return &scanPlanTextAnyToEnumString{codec: c}
+ case *[]byte:
+ return scanPlanAnyToNewByteSlice{}
+ case TextScanner:
+ return &scanPlanTextAnyToEnumTextScanner{codec: c}
+ }
+ }
+
+ return nil
+}
+
+func (c *EnumCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c *EnumCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ return c.lookupAndCacheString(src), nil
+}
+
+// lookupAndCacheString looks for src in the members map. If it is not found it is added to the map.
+func (c *EnumCodec) lookupAndCacheString(src []byte) string {
+ if c.membersMap == nil {
+ c.membersMap = make(map[string]string)
+ }
+
+ if s, found := c.membersMap[string(src)]; found {
+ return s
+ }
+
+ s := string(src)
+ c.membersMap[s] = s
+ return s
+}
+
+type scanPlanTextAnyToEnumString struct {
+ codec *EnumCodec
+}
+
+func (plan *scanPlanTextAnyToEnumString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = plan.codec.lookupAndCacheString(src)
+
+ return nil
+}
+
+type scanPlanTextAnyToEnumTextScanner struct {
+ codec *EnumCodec
+}
+
+func (plan *scanPlanTextAnyToEnumTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: plan.codec.lookupAndCacheString(src), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/float4.go b/vendor/github.com/jackc/pgx/v5/pgtype/float4.go
new file mode 100644
index 0000000..8646d9d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/float4.go
@@ -0,0 +1,319 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Float4 struct {
+ Float32 float32
+ Valid bool
+}
+
+// ScanFloat64 implements the Float64Scanner interface.
+func (f *Float4) ScanFloat64(n Float8) error {
+ *f = Float4{Float32: float32(n.Float64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float4) Float64Value() (Float8, error) {
+ return Float8{Float64: float64(f.Float32), Valid: f.Valid}, nil
+}
+
+func (f *Float4) ScanInt64(n Int8) error {
+ *f = Float4{Float32: float32(n.Int64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float4) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(f.Float32), Valid: f.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (f *Float4) Scan(src any) error {
+ if src == nil {
+ *f = Float4{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *f = Float4{Float32: float32(src), Valid: true}
+ return nil
+ case string:
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+ *f = Float4{Float32: float32(n), Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (f Float4) Value() (driver.Value, error) {
+ if !f.Valid {
+ return nil, nil
+ }
+ return float64(f.Float32), nil
+}
+
+func (f Float4) MarshalJSON() ([]byte, error) {
+ if !f.Valid {
+ return []byte("null"), nil
+ }
+ return json.Marshal(f.Float32)
+}
+
+func (f *Float4) UnmarshalJSON(b []byte) error {
+ var n *float32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *f = Float4{}
+ } else {
+ *f = Float4{Float32: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Float4Codec struct{}
+
+func (Float4Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Float4Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Float4Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case float32:
+ return encodePlanFloat4CodecBinaryFloat32{}
+ case Float64Valuer:
+ return encodePlanFloat4CodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanFloat4CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case float32:
+ return encodePlanTextFloat32{}
+ case Float64Valuer:
+ return encodePlanTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanFloat4CodecBinaryFloat32 struct{}
+
+func (encodePlanFloat4CodecBinaryFloat32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float32)
+ return pgio.AppendUint32(buf, math.Float32bits(n)), nil
+}
+
+type encodePlanTextFloat32 struct{}
+
+func (encodePlanTextFloat32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float32)
+ return append(buf, strconv.FormatFloat(float64(n), 'f', -1, 32)...), nil
+}
+
+type encodePlanFloat4CodecBinaryFloat64Valuer struct{}
+
+func (encodePlanFloat4CodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint32(buf, math.Float32bits(float32(n.Float64))), nil
+}
+
+type encodePlanFloat4CodecBinaryInt64Valuer struct{}
+
+func (encodePlanFloat4CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ f := float32(n.Int64)
+ return pgio.AppendUint32(buf, math.Float32bits(f)), nil
+}
+
+func (Float4Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *float32:
+ return scanPlanBinaryFloat4ToFloat32{}
+ case Float64Scanner:
+ return scanPlanBinaryFloat4ToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryFloat4ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryFloat4ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *float32:
+ return scanPlanTextAnyToFloat32{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryFloat4ToFloat32 struct{}
+
+func (scanPlanBinaryFloat4ToFloat32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ f := (dst).(*float32)
+ *f = math.Float32frombits(uint32(n))
+
+ return nil
+}
+
+type scanPlanBinaryFloat4ToFloat64Scanner struct{}
+
+func (scanPlanBinaryFloat4ToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ return s.ScanFloat64(Float8{Float64: float64(math.Float32frombits(uint32(n))), Valid: true})
+}
+
+type scanPlanBinaryFloat4ToInt64Scanner struct{}
+
+func (scanPlanBinaryFloat4ToInt64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Int64Scanner)
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ ui32 := int32(binary.BigEndian.Uint32(src))
+ f32 := math.Float32frombits(uint32(ui32))
+ i64 := int64(f32)
+ if f32 != float32(i64) {
+ return fmt.Errorf("cannot losslessly convert %v to int64", f32)
+ }
+
+ return s.ScanInt64(Int8{Int64: i64, Valid: true})
+}
+
+type scanPlanBinaryFloat4ToTextScanner struct{}
+
+func (scanPlanBinaryFloat4ToTextScanner) Scan(src []byte, dst any) error {
+ s := (dst).(TextScanner)
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ ui32 := int32(binary.BigEndian.Uint32(src))
+ f32 := math.Float32frombits(uint32(ui32))
+
+ return s.ScanText(Text{String: strconv.FormatFloat(float64(f32), 'f', -1, 32), Valid: true})
+}
+
+type scanPlanTextAnyToFloat32 struct{}
+
+func (scanPlanTextAnyToFloat32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+
+ f := (dst).(*float32)
+ *f = float32(n)
+
+ return nil
+}
+
+func (c Float4Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return float64(n), nil
+}
+
+func (c Float4Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/float8.go b/vendor/github.com/jackc/pgx/v5/pgtype/float8.go
new file mode 100644
index 0000000..9c923c9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/float8.go
@@ -0,0 +1,365 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Float64Scanner interface {
+ ScanFloat64(Float8) error
+}
+
+type Float64Valuer interface {
+ Float64Value() (Float8, error)
+}
+
+type Float8 struct {
+ Float64 float64
+ Valid bool
+}
+
+// ScanFloat64 implements the Float64Scanner interface.
+func (f *Float8) ScanFloat64(n Float8) error {
+ *f = n
+ return nil
+}
+
+func (f Float8) Float64Value() (Float8, error) {
+ return f, nil
+}
+
+func (f *Float8) ScanInt64(n Int8) error {
+ *f = Float8{Float64: float64(n.Int64), Valid: n.Valid}
+ return nil
+}
+
+func (f Float8) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(f.Float64), Valid: f.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (f *Float8) Scan(src any) error {
+ if src == nil {
+ *f = Float8{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *f = Float8{Float64: src, Valid: true}
+ return nil
+ case string:
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+ *f = Float8{Float64: n, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (f Float8) Value() (driver.Value, error) {
+ if !f.Valid {
+ return nil, nil
+ }
+ return f.Float64, nil
+}
+
+func (f Float8) MarshalJSON() ([]byte, error) {
+ if !f.Valid {
+ return []byte("null"), nil
+ }
+ return json.Marshal(f.Float64)
+}
+
+func (f *Float8) UnmarshalJSON(b []byte) error {
+ var n *float64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *f = Float8{}
+ } else {
+ *f = Float8{Float64: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Float8Codec struct{}
+
+func (Float8Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Float8Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Float8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case float64:
+ return encodePlanFloat8CodecBinaryFloat64{}
+ case Float64Valuer:
+ return encodePlanFloat8CodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanFloat8CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case float64:
+ return encodePlanTextFloat64{}
+ case Float64Valuer:
+ return encodePlanTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanFloat8CodecBinaryFloat64 struct{}
+
+func (encodePlanFloat8CodecBinaryFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float64)
+ return pgio.AppendUint64(buf, math.Float64bits(n)), nil
+}
+
+type encodePlanTextFloat64 struct{}
+
+func (encodePlanTextFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(float64)
+ return append(buf, strconv.FormatFloat(n, 'f', -1, 64)...), nil
+}
+
+type encodePlanFloat8CodecBinaryFloat64Valuer struct{}
+
+func (encodePlanFloat8CodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint64(buf, math.Float64bits(n.Float64)), nil
+}
+
+type encodePlanTextFloat64Valuer struct{}
+
+func (encodePlanTextFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatFloat(n.Float64, 'f', -1, 64)...), nil
+}
+
+type encodePlanFloat8CodecBinaryInt64Valuer struct{}
+
+func (encodePlanFloat8CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ f := float64(n.Int64)
+ return pgio.AppendUint64(buf, math.Float64bits(f)), nil
+}
+
+type encodePlanTextInt64Valuer struct{}
+
+func (encodePlanTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Float8Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *float64:
+ return scanPlanBinaryFloat8ToFloat64{}
+ case Float64Scanner:
+ return scanPlanBinaryFloat8ToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryFloat8ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryFloat8ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *float64:
+ return scanPlanTextAnyToFloat64{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryFloat8ToFloat64 struct{}
+
+func (scanPlanBinaryFloat8ToFloat64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ f := (dst).(*float64)
+ *f = math.Float64frombits(uint64(n))
+
+ return nil
+}
+
+type scanPlanBinaryFloat8ToFloat64Scanner struct{}
+
+func (scanPlanBinaryFloat8ToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ return s.ScanFloat64(Float8{Float64: math.Float64frombits(uint64(n)), Valid: true})
+}
+
+type scanPlanBinaryFloat8ToInt64Scanner struct{}
+
+func (scanPlanBinaryFloat8ToInt64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Int64Scanner)
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ ui64 := int64(binary.BigEndian.Uint64(src))
+ f64 := math.Float64frombits(uint64(ui64))
+ i64 := int64(f64)
+ if f64 != float64(i64) {
+ return fmt.Errorf("cannot losslessly convert %v to int64", f64)
+ }
+
+ return s.ScanInt64(Int8{Int64: i64, Valid: true})
+}
+
+type scanPlanBinaryFloat8ToTextScanner struct{}
+
+func (scanPlanBinaryFloat8ToTextScanner) Scan(src []byte, dst any) error {
+ s := (dst).(TextScanner)
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for float8: %v", len(src))
+ }
+
+ ui64 := int64(binary.BigEndian.Uint64(src))
+ f64 := math.Float64frombits(uint64(ui64))
+
+ return s.ScanText(Text{String: strconv.FormatFloat(f64, 'f', -1, 64), Valid: true})
+}
+
+type scanPlanTextAnyToFloat64 struct{}
+
+func (scanPlanTextAnyToFloat64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ f := (dst).(*float64)
+ *f = n
+
+ return nil
+}
+
+type scanPlanTextAnyToFloat64Scanner struct{}
+
+func (scanPlanTextAnyToFloat64Scanner) Scan(src []byte, dst any) error {
+ s := (dst).(Float64Scanner)
+
+ if src == nil {
+ return s.ScanFloat64(Float8{})
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanFloat64(Float8{Float64: n, Valid: true})
+}
+
+func (c Float8Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c Float8Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n float64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go b/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go
new file mode 100644
index 0000000..2f34f4c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/hstore.go
@@ -0,0 +1,486 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type HstoreScanner interface {
+ ScanHstore(v Hstore) error
+}
+
+type HstoreValuer interface {
+ HstoreValue() (Hstore, error)
+}
+
+// Hstore represents an hstore column that can be null or have null values
+// associated with its keys.
+type Hstore map[string]*string
+
+func (h *Hstore) ScanHstore(v Hstore) error {
+ *h = v
+ return nil
+}
+
+func (h Hstore) HstoreValue() (Hstore, error) {
+ return h, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (h *Hstore) Scan(src any) error {
+ if src == nil {
+ *h = nil
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToHstoreScanner{}.scanString(src, h)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (h Hstore) Value() (driver.Value, error) {
+ if h == nil {
+ return nil, nil
+ }
+
+ buf, err := HstoreCodec{}.PlanEncode(nil, 0, TextFormatCode, h).Encode(h, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type HstoreCodec struct{}
+
+func (HstoreCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (HstoreCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (HstoreCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(HstoreValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanHstoreCodecBinary{}
+ case TextFormatCode:
+ return encodePlanHstoreCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanHstoreCodecBinary struct{}
+
+func (encodePlanHstoreCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ hstore, err := value.(HstoreValuer).HstoreValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if hstore == nil {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(hstore)))
+
+ for k, v := range hstore {
+ buf = pgio.AppendInt32(buf, int32(len(k)))
+ buf = append(buf, k...)
+
+ if v == nil {
+ buf = pgio.AppendInt32(buf, -1)
+ } else {
+ buf = pgio.AppendInt32(buf, int32(len(*v)))
+ buf = append(buf, (*v)...)
+ }
+ }
+
+ return buf, nil
+}
+
+type encodePlanHstoreCodecText struct{}
+
+func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ hstore, err := value.(HstoreValuer).HstoreValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(hstore) == 0 {
+ // distinguish between empty and nil: Not strictly required by Postgres, since its protocol
+ // explicitly marks NULL column values separately. However, the Binary codec does this, and
+ // this means we can "round trip" Encode and Scan without data loss.
+ // nil: []byte(nil); empty: []byte{}
+ if hstore == nil {
+ return nil, nil
+ }
+ return []byte{}, nil
+ }
+
+ firstPair := true
+
+ for k, v := range hstore {
+ if firstPair {
+ firstPair = false
+ } else {
+ buf = append(buf, ',', ' ')
+ }
+
+ // unconditionally quote hstore keys/values like Postgres does
+ // this avoids a Mac OS X Postgres hstore parsing bug:
+ // https://www.postgresql.org/message-id/CA%2BHWA9awUW0%2BRV_gO9r1ABZwGoZxPztcJxPy8vMFSTbTfi4jig%40mail.gmail.com
+ buf = append(buf, '"')
+ buf = append(buf, quoteArrayReplacer.Replace(k)...)
+ buf = append(buf, '"')
+ buf = append(buf, "=>"...)
+
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, '"')
+ buf = append(buf, quoteArrayReplacer.Replace(*v)...)
+ buf = append(buf, '"')
+ }
+ }
+
+ return buf, nil
+}
+
+func (HstoreCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case HstoreScanner:
+ return scanPlanBinaryHstoreToHstoreScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case HstoreScanner:
+ return scanPlanTextAnyToHstoreScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryHstoreToHstoreScanner struct{}
+
+func (scanPlanBinaryHstoreToHstoreScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(HstoreScanner)
+
+ if src == nil {
+ return scanner.ScanHstore(Hstore(nil))
+ }
+
+ rp := 0
+
+ const uint32Len = 4
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ pairCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += uint32Len
+
+ hstore := make(Hstore, pairCount)
+ // one allocation for all *string, rather than one per string, just like text parsing
+ valueStrings := make([]string, pairCount)
+
+ for i := 0; i < pairCount; i++ {
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ keyLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += uint32Len
+
+ if len(src[rp:]) < keyLen {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ key := string(src[rp : rp+keyLen])
+ rp += keyLen
+
+ if len(src[rp:]) < uint32Len {
+ return fmt.Errorf("hstore incomplete %v", src)
+ }
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ if valueLen >= 0 {
+ valueStrings[i] = string(src[rp : rp+valueLen])
+ rp += valueLen
+
+ hstore[key] = &valueStrings[i]
+ } else {
+ hstore[key] = nil
+ }
+ }
+
+ return scanner.ScanHstore(hstore)
+}
+
+type scanPlanTextAnyToHstoreScanner struct{}
+
+func (s scanPlanTextAnyToHstoreScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(HstoreScanner)
+
+ if src == nil {
+ return scanner.ScanHstore(Hstore(nil))
+ }
+ return s.scanString(string(src), scanner)
+}
+
+// scanString does not return nil hstore values because string cannot be nil.
+func (scanPlanTextAnyToHstoreScanner) scanString(src string, scanner HstoreScanner) error {
+ hstore, err := parseHstore(src)
+ if err != nil {
+ return err
+ }
+ return scanner.ScanHstore(hstore)
+}
+
+func (c HstoreCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c HstoreCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var hstore Hstore
+ err := codecScan(c, m, oid, format, src, &hstore)
+ if err != nil {
+ return nil, err
+ }
+ return hstore, nil
+}
+
+type hstoreParser struct {
+ str string
+ pos int
+ nextBackslash int
+}
+
+func newHSP(in string) *hstoreParser {
+ return &hstoreParser{
+ pos: 0,
+ str: in,
+ nextBackslash: strings.IndexByte(in, '\\'),
+ }
+}
+
+func (p *hstoreParser) atEnd() bool {
+ return p.pos >= len(p.str)
+}
+
+// consume returns the next byte of the string, or end if the string is done.
+func (p *hstoreParser) consume() (b byte, end bool) {
+ if p.pos >= len(p.str) {
+ return 0, true
+ }
+ b = p.str[p.pos]
+ p.pos++
+ return b, false
+}
+
+func unexpectedByteErr(actualB byte, expectedB byte) error {
+ return fmt.Errorf("expected '%c' ('%#v'); found '%c' ('%#v')", expectedB, expectedB, actualB, actualB)
+}
+
+// consumeExpectedByte consumes expectedB from the string, or returns an error.
+func (p *hstoreParser) consumeExpectedByte(expectedB byte) error {
+ nextB, end := p.consume()
+ if end {
+ return fmt.Errorf("expected '%c' ('%#v'); found end", expectedB, expectedB)
+ }
+ if nextB != expectedB {
+ return unexpectedByteErr(nextB, expectedB)
+ }
+ return nil
+}
+
+// consumeExpected2 consumes two expected bytes or returns an error.
+// This was a bit faster than using a string argument (better inlining? Not sure).
+func (p *hstoreParser) consumeExpected2(one byte, two byte) error {
+ if p.pos+2 > len(p.str) {
+ return errors.New("unexpected end of string")
+ }
+ if p.str[p.pos] != one {
+ return unexpectedByteErr(p.str[p.pos], one)
+ }
+ if p.str[p.pos+1] != two {
+ return unexpectedByteErr(p.str[p.pos+1], two)
+ }
+ p.pos += 2
+ return nil
+}
+
+var errEOSInQuoted = errors.New(`found end before closing double-quote ('"')`)
+
+// consumeDoubleQuoted consumes a double-quoted string from p. The double quote must have been
+// parsed already. This copies the string from the backing string so it can be garbage collected.
+func (p *hstoreParser) consumeDoubleQuoted() (string, error) {
+ // fast path: assume most keys/values do not contain escapes
+ nextDoubleQuote := strings.IndexByte(p.str[p.pos:], '"')
+ if nextDoubleQuote == -1 {
+ return "", errEOSInQuoted
+ }
+ nextDoubleQuote += p.pos
+ if p.nextBackslash == -1 || p.nextBackslash > nextDoubleQuote {
+ // clone the string from the source string to ensure it can be garbage collected separately
+ // TODO: use strings.Clone on Go 1.20; this could get optimized away
+ s := strings.Clone(p.str[p.pos:nextDoubleQuote])
+ p.pos = nextDoubleQuote + 1
+ return s, nil
+ }
+
+ // slow path: string contains escapes
+ s, err := p.consumeDoubleQuotedWithEscapes(p.nextBackslash)
+ p.nextBackslash = strings.IndexByte(p.str[p.pos:], '\\')
+ if p.nextBackslash != -1 {
+ p.nextBackslash += p.pos
+ }
+ return s, err
+}
+
+// consumeDoubleQuotedWithEscapes consumes a double-quoted string containing escapes, starting
+// at p.pos, and with the first backslash at firstBackslash. This copies the string so it can be
+// garbage collected separately.
+func (p *hstoreParser) consumeDoubleQuotedWithEscapes(firstBackslash int) (string, error) {
+ // copy the prefix that does not contain backslashes
+ var builder strings.Builder
+ builder.WriteString(p.str[p.pos:firstBackslash])
+
+ // skip to the backslash
+ p.pos = firstBackslash
+
+ // copy bytes until the end, unescaping backslashes
+ for {
+ nextB, end := p.consume()
+ if end {
+ return "", errEOSInQuoted
+ } else if nextB == '"' {
+ break
+ } else if nextB == '\\' {
+ // escape: skip the backslash and copy the char
+ nextB, end = p.consume()
+ if end {
+ return "", errEOSInQuoted
+ }
+ if !(nextB == '\\' || nextB == '"') {
+ return "", fmt.Errorf("unexpected escape in quoted string: found '%#v'", nextB)
+ }
+ builder.WriteByte(nextB)
+ } else {
+ // normal byte: copy it
+ builder.WriteByte(nextB)
+ }
+ }
+ return builder.String(), nil
+}
+
+// consumePairSeparator consumes the Hstore pair separator ", " or returns an error.
+func (p *hstoreParser) consumePairSeparator() error {
+ return p.consumeExpected2(',', ' ')
+}
+
+// consumeKVSeparator consumes the Hstore key/value separator "=>" or returns an error.
+func (p *hstoreParser) consumeKVSeparator() error {
+ return p.consumeExpected2('=', '>')
+}
+
+// consumeDoubleQuotedOrNull consumes the Hstore key/value separator "=>" or returns an error.
+func (p *hstoreParser) consumeDoubleQuotedOrNull() (Text, error) {
+ // peek at the next byte
+ if p.atEnd() {
+ return Text{}, errors.New("found end instead of value")
+ }
+ next := p.str[p.pos]
+ if next == 'N' {
+ // must be the exact string NULL: use consumeExpected2 twice
+ err := p.consumeExpected2('N', 'U')
+ if err != nil {
+ return Text{}, err
+ }
+ err = p.consumeExpected2('L', 'L')
+ if err != nil {
+ return Text{}, err
+ }
+ return Text{String: "", Valid: false}, nil
+ } else if next != '"' {
+ return Text{}, unexpectedByteErr(next, '"')
+ }
+
+ // skip the double quote
+ p.pos += 1
+ s, err := p.consumeDoubleQuoted()
+ if err != nil {
+ return Text{}, err
+ }
+ return Text{String: s, Valid: true}, nil
+}
+
+func parseHstore(s string) (Hstore, error) {
+ p := newHSP(s)
+
+ // This is an over-estimate of the number of key/value pairs. Use '>' because I am guessing it
+ // is less likely to occur in keys/values than '=' or ','.
+ numPairsEstimate := strings.Count(s, ">")
+ // makes one allocation of strings for the entire Hstore, rather than one allocation per value.
+ valueStrings := make([]string, 0, numPairsEstimate)
+ result := make(Hstore, numPairsEstimate)
+ first := true
+ for !p.atEnd() {
+ if !first {
+ err := p.consumePairSeparator()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ first = false
+ }
+
+ err := p.consumeExpectedByte('"')
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := p.consumeDoubleQuoted()
+ if err != nil {
+ return nil, err
+ }
+
+ err = p.consumeKVSeparator()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := p.consumeDoubleQuotedOrNull()
+ if err != nil {
+ return nil, err
+ }
+ if value.Valid {
+ valueStrings = append(valueStrings, value.String)
+ result[key] = &valueStrings[len(valueStrings)-1]
+ } else {
+ result[key] = nil
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/inet.go b/vendor/github.com/jackc/pgx/v5/pgtype/inet.go
new file mode 100644
index 0000000..6ca10ea
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/inet.go
@@ -0,0 +1,200 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "net/netip"
+)
+
+// Network address family is dependent on server socket.h value for AF_INET.
+// In practice, all platforms appear to have the same value. See
+// src/include/utils/inet.h for more information.
+const (
+ defaultAFInet = 2
+ defaultAFInet6 = 3
+)
+
+type NetipPrefixScanner interface {
+ ScanNetipPrefix(v netip.Prefix) error
+}
+
+type NetipPrefixValuer interface {
+ NetipPrefixValue() (netip.Prefix, error)
+}
+
+// InetCodec handles both inet and cidr PostgreSQL types. The preferred Go types are netip.Prefix and netip.Addr. If
+// IsValid() is false then they are treated as SQL NULL.
+type InetCodec struct{}
+
+func (InetCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (InetCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (InetCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(NetipPrefixValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanInetCodecBinary{}
+ case TextFormatCode:
+ return encodePlanInetCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanInetCodecBinary struct{}
+
+func (encodePlanInetCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ prefix, err := value.(NetipPrefixValuer).NetipPrefixValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ var family byte
+ if prefix.Addr().Is4() {
+ family = defaultAFInet
+ } else {
+ family = defaultAFInet6
+ }
+
+ buf = append(buf, family)
+
+ ones := prefix.Bits()
+ buf = append(buf, byte(ones))
+
+ // is_cidr is ignored on server
+ buf = append(buf, 0)
+
+ if family == defaultAFInet {
+ buf = append(buf, byte(4))
+ b := prefix.Addr().As4()
+ buf = append(buf, b[:]...)
+ } else {
+ buf = append(buf, byte(16))
+ b := prefix.Addr().As16()
+ buf = append(buf, b[:]...)
+ }
+
+ return buf, nil
+}
+
+type encodePlanInetCodecText struct{}
+
+func (encodePlanInetCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ prefix, err := value.(NetipPrefixValuer).NetipPrefixValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ return append(buf, prefix.String()...), nil
+}
+
+func (InetCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case NetipPrefixScanner:
+ return scanPlanBinaryInetToNetipPrefixScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case NetipPrefixScanner:
+ return scanPlanTextAnyToNetipPrefixScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c InetCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c InetCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var prefix netip.Prefix
+ err := codecScan(c, m, oid, format, src, (*netipPrefixWrapper)(&prefix))
+ if err != nil {
+ return nil, err
+ }
+
+ if !prefix.IsValid() {
+ return nil, nil
+ }
+
+ return prefix, nil
+}
+
+type scanPlanBinaryInetToNetipPrefixScanner struct{}
+
+func (scanPlanBinaryInetToNetipPrefixScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NetipPrefixScanner)
+
+ if src == nil {
+ return scanner.ScanNetipPrefix(netip.Prefix{})
+ }
+
+ if len(src) != 8 && len(src) != 20 {
+ return fmt.Errorf("Received an invalid size for an inet: %d", len(src))
+ }
+
+ // ignore family
+ bits := src[1]
+ // ignore is_cidr
+ // ignore addressLength - implicit in length of message
+
+ addr, ok := netip.AddrFromSlice(src[4:])
+ if !ok {
+ return errors.New("netip.AddrFromSlice failed")
+ }
+
+ return scanner.ScanNetipPrefix(netip.PrefixFrom(addr, int(bits)))
+}
+
+type scanPlanTextAnyToNetipPrefixScanner struct{}
+
+func (scanPlanTextAnyToNetipPrefixScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NetipPrefixScanner)
+
+ if src == nil {
+ return scanner.ScanNetipPrefix(netip.Prefix{})
+ }
+
+ var prefix netip.Prefix
+ if bytes.IndexByte(src, '/') == -1 {
+ addr, err := netip.ParseAddr(string(src))
+ if err != nil {
+ return err
+ }
+ prefix = netip.PrefixFrom(addr, addr.BitLen())
+ } else {
+ var err error
+ prefix, err = netip.ParsePrefix(string(src))
+ if err != nil {
+ return err
+ }
+ }
+
+ return scanner.ScanNetipPrefix(prefix)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int.go b/vendor/github.com/jackc/pgx/v5/pgtype/int.go
new file mode 100644
index 0000000..90a20a2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int.go
@@ -0,0 +1,1980 @@
+// Do not edit. Generated from pgtype/int.go.erb
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Int64Scanner interface {
+ ScanInt64(Int8) error
+}
+
+type Int64Valuer interface {
+ Int64Value() (Int8, error)
+}
+
+type Int2 struct {
+ Int16 int16
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int2) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int2{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for Int2", n.Int64)
+ }
+ if n.Int64 > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n.Int64)
+ }
+ *dst = Int2{Int16: int16(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int2) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int16), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2) Scan(src any) error {
+ if src == nil {
+ *dst = Int2{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 16)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n)
+ }
+ if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for Int2", n)
+ }
+ *dst = Int2{Int16: int16(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int2) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int16), nil
+}
+
+func (src Int2) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int16), 10)), nil
+}
+
+func (dst *Int2) UnmarshalJSON(b []byte) error {
+ var n *int16
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int2{}
+ } else {
+ *dst = Int2{Int16: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int2Codec struct{}
+
+func (Int2Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int2Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int2Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int16:
+ return encodePlanInt2CodecBinaryInt16{}
+ case Int64Valuer:
+ return encodePlanInt2CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int16:
+ return encodePlanInt2CodecTextInt16{}
+ case Int64Valuer:
+ return encodePlanInt2CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt2CodecBinaryInt16 struct{}
+
+func (encodePlanInt2CodecBinaryInt16) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int16)
+ return pgio.AppendInt16(buf, int16(n)), nil
+}
+
+type encodePlanInt2CodecTextInt16 struct{}
+
+func (encodePlanInt2CodecTextInt16) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int16)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt2CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt2CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt16 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int2", n.Int64)
+ }
+ if n.Int64 < math.MinInt16 {
+ return nil, fmt.Errorf("%d is less than minimum value for int2", n.Int64)
+ }
+
+ return pgio.AppendInt16(buf, int16(n.Int64)), nil
+}
+
+type encodePlanInt2CodecTextInt64Valuer struct{}
+
+func (encodePlanInt2CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt16 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int2", n.Int64)
+ }
+ if n.Int64 < math.MinInt16 {
+ return nil, fmt.Errorf("%d is less than minimum value for int2", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int2Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt2ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt2ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt2ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt2ToInt64{}
+ case *int:
+ return scanPlanBinaryInt2ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt2ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt2ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt2ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt2ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt2ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt2ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt2ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int2Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int2Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int16
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt2ToInt8 struct{}
+
+func (scanPlanBinaryInt2ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint8 struct{}
+
+func (scanPlanBinaryInt2ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt16 struct{}
+
+func (scanPlanBinaryInt2ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int16(binary.BigEndian.Uint16(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint16 struct{}
+
+func (scanPlanBinaryInt2ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt32 struct{}
+
+func (scanPlanBinaryInt2ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int32(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint32 struct{}
+
+func (scanPlanBinaryInt2ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt64 struct{}
+
+func (scanPlanBinaryInt2ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint64 struct{}
+
+func (scanPlanBinaryInt2ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt struct{}
+
+func (scanPlanBinaryInt2ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int(int16(binary.BigEndian.Uint16(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToUint struct{}
+
+func (scanPlanBinaryInt2ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for uint2: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt2ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt2ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt2ToTextScanner struct{}
+
+func (scanPlanBinaryInt2ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 2 {
+ return fmt.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int64(int16(binary.BigEndian.Uint16(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type Int4 struct {
+ Int32 int32
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int4) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int4{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for Int4", n.Int64)
+ }
+ if n.Int64 > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n.Int64)
+ }
+ *dst = Int4{Int32: int32(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int4) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int32), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4) Scan(src any) error {
+ if src == nil {
+ *dst = Int4{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 32)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n)
+ }
+ if n > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for Int4", n)
+ }
+ *dst = Int4{Int32: int32(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int4) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int32), nil
+}
+
+func (src Int4) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int32), 10)), nil
+}
+
+func (dst *Int4) UnmarshalJSON(b []byte) error {
+ var n *int32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int4{}
+ } else {
+ *dst = Int4{Int32: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int4Codec struct{}
+
+func (Int4Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int4Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int4Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int32:
+ return encodePlanInt4CodecBinaryInt32{}
+ case Int64Valuer:
+ return encodePlanInt4CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int32:
+ return encodePlanInt4CodecTextInt32{}
+ case Int64Valuer:
+ return encodePlanInt4CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt4CodecBinaryInt32 struct{}
+
+func (encodePlanInt4CodecBinaryInt32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int32)
+ return pgio.AppendInt32(buf, int32(n)), nil
+}
+
+type encodePlanInt4CodecTextInt32 struct{}
+
+func (encodePlanInt4CodecTextInt32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int32)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt4CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt4CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int4", n.Int64)
+ }
+ if n.Int64 < math.MinInt32 {
+ return nil, fmt.Errorf("%d is less than minimum value for int4", n.Int64)
+ }
+
+ return pgio.AppendInt32(buf, int32(n.Int64)), nil
+}
+
+type encodePlanInt4CodecTextInt64Valuer struct{}
+
+func (encodePlanInt4CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int4", n.Int64)
+ }
+ if n.Int64 < math.MinInt32 {
+ return nil, fmt.Errorf("%d is less than minimum value for int4", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int4Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt4ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt4ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt4ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt4ToInt64{}
+ case *int:
+ return scanPlanBinaryInt4ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt4ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt4ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt4ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt4ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt4ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt4ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt4ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int4Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int4Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt4ToInt8 struct{}
+
+func (scanPlanBinaryInt4ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint8 struct{}
+
+func (scanPlanBinaryInt4ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt16 struct{}
+
+func (scanPlanBinaryInt4ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", n)
+ } else if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", n)
+ }
+
+ *p = int16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint16 struct{}
+
+func (scanPlanBinaryInt4ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ if n > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt32 struct{}
+
+func (scanPlanBinaryInt4ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int32(binary.BigEndian.Uint32(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint32 struct{}
+
+func (scanPlanBinaryInt4ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt64 struct{}
+
+func (scanPlanBinaryInt4ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(int32(binary.BigEndian.Uint32(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint64 struct{}
+
+func (scanPlanBinaryInt4ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt struct{}
+
+func (scanPlanBinaryInt4ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int(int32(binary.BigEndian.Uint32(src)))
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToUint struct{}
+
+func (scanPlanBinaryInt4ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint4: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt4ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt4ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt4ToTextScanner struct{}
+
+func (scanPlanBinaryInt4ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int64(int32(binary.BigEndian.Uint32(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type Int8 struct {
+ Int64 int64
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int8) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int8{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt64 {
+ return fmt.Errorf("%d is less than minimum value for Int8", n.Int64)
+ }
+ if n.Int64 > math.MaxInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n.Int64)
+ }
+ *dst = Int8{Int64: int64(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int8) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int64), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8) Scan(src any) error {
+ if src == nil {
+ *dst = Int8{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, 64)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n)
+ }
+ if n > math.MaxInt64 {
+ return fmt.Errorf("%d is greater than maximum value for Int8", n)
+ }
+ *dst = Int8{Int64: int64(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int8) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int64), nil
+}
+
+func (src Int8) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int64), 10)), nil
+}
+
+func (dst *Int8) UnmarshalJSON(b []byte) error {
+ var n *int64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int8{}
+ } else {
+ *dst = Int8{Int64: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int8Codec struct{}
+
+func (Int8Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int8Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int64:
+ return encodePlanInt8CodecBinaryInt64{}
+ case Int64Valuer:
+ return encodePlanInt8CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int64:
+ return encodePlanInt8CodecTextInt64{}
+ case Int64Valuer:
+ return encodePlanInt8CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt8CodecBinaryInt64 struct{}
+
+func (encodePlanInt8CodecBinaryInt64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int64)
+ return pgio.AppendInt64(buf, int64(n)), nil
+}
+
+type encodePlanInt8CodecTextInt64 struct{}
+
+func (encodePlanInt8CodecTextInt64) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int64)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt8CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt8CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt64 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
+ }
+ if n.Int64 < math.MinInt64 {
+ return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
+ }
+
+ return pgio.AppendInt64(buf, int64(n.Int64)), nil
+}
+
+type encodePlanInt8CodecTextInt64Valuer struct{}
+
+func (encodePlanInt8CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt64 {
+ return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
+ }
+ if n.Int64 < math.MinInt64 {
+ return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int8Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt8ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt8ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt8ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt8ToInt64{}
+ case *int:
+ return scanPlanBinaryInt8ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt8ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt8ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt8ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt8ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt8ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt8ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt8ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int8Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int8Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryInt8ToInt8 struct{}
+
+func (scanPlanBinaryInt8ToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt8 {
+ return fmt.Errorf("%d is less than minimum value for int8", n)
+ } else if n > math.MaxInt8 {
+ return fmt.Errorf("%d is greater than maximum value for int8", n)
+ }
+
+ *p = int8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint8 struct{}
+
+func (scanPlanBinaryInt8ToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint8", n)
+ }
+
+ if n > math.MaxUint8 {
+ return fmt.Errorf("%d is greater than maximum value for uint8", n)
+ }
+
+ *p = uint8(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt16 struct{}
+
+func (scanPlanBinaryInt8ToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt16 {
+ return fmt.Errorf("%d is less than minimum value for int16", n)
+ } else if n > math.MaxInt16 {
+ return fmt.Errorf("%d is greater than maximum value for int16", n)
+ }
+
+ *p = int16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint16 struct{}
+
+func (scanPlanBinaryInt8ToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint16", n)
+ }
+
+ if n > math.MaxUint16 {
+ return fmt.Errorf("%d is greater than maximum value for uint16", n)
+ }
+
+ *p = uint16(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt32 struct{}
+
+func (scanPlanBinaryInt8ToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt32 {
+ return fmt.Errorf("%d is less than minimum value for int32", n)
+ } else if n > math.MaxInt32 {
+ return fmt.Errorf("%d is greater than maximum value for int32", n)
+ }
+
+ *p = int32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint32 struct{}
+
+func (scanPlanBinaryInt8ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint32", n)
+ }
+
+ if n > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for uint32", n)
+ }
+
+ *p = uint32(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt64 struct{}
+
+func (scanPlanBinaryInt8ToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ *p = int64(binary.BigEndian.Uint64(src))
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint64 struct{}
+
+func (scanPlanBinaryInt8ToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint64", n)
+ }
+
+ *p = uint64(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt struct{}
+
+func (scanPlanBinaryInt8ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+ if n < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", n)
+ } else if n > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", n)
+ }
+
+ *p = int(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToUint struct{}
+
+func (scanPlanBinaryInt8ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for uint8: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+
+ if uint64(n) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", n)
+ }
+
+ *p = uint(n)
+
+ return nil
+}
+
+type scanPlanBinaryInt8ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt8ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+type scanPlanBinaryInt8ToTextScanner struct{}
+
+func (scanPlanBinaryInt8ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(int64(binary.BigEndian.Uint64(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+
+type scanPlanTextAnyToInt8 struct{}
+
+func (scanPlanTextAnyToInt8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 8)
+ if err != nil {
+ return err
+ }
+
+ *p = int8(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint8 struct{}
+
+func (scanPlanTextAnyToUint8) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint8)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 8)
+ if err != nil {
+ return err
+ }
+
+ *p = uint8(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt16 struct{}
+
+func (scanPlanTextAnyToInt16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *p = int16(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint16 struct{}
+
+func (scanPlanTextAnyToUint16) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint16)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *p = uint16(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt32 struct{}
+
+func (scanPlanTextAnyToInt32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *p = int32(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint32 struct{}
+
+func (scanPlanTextAnyToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint32)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *p = uint32(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt64 struct{}
+
+func (scanPlanTextAnyToInt64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *p = int64(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint64 struct{}
+
+func (scanPlanTextAnyToUint64) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint64)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *p = uint64(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt struct{}
+
+func (scanPlanTextAnyToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ *p = int(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint struct{}
+
+func (scanPlanTextAnyToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 0)
+ if err != nil {
+ return err
+ }
+
+ *p = uint(n)
+ return nil
+}
+
+type scanPlanTextAnyToInt64Scanner struct{}
+
+func (scanPlanTextAnyToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ err = s.ScanInt64(Int8{Int64: n, Valid: true})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb
new file mode 100644
index 0000000..e0c8b7a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int.go.erb
@@ -0,0 +1,548 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Int64Scanner interface {
+ ScanInt64(Int8) error
+}
+
+type Int64Valuer interface {
+ Int64Value() (Int8, error)
+}
+
+
+<% [2, 4, 8].each do |pg_byte_size| %>
+<% pg_bit_size = pg_byte_size * 8 %>
+type Int<%= pg_byte_size %> struct {
+ Int<%= pg_bit_size %> int<%= pg_bit_size %>
+ Valid bool
+}
+
+// ScanInt64 implements the Int64Scanner interface.
+func (dst *Int<%= pg_byte_size %>) ScanInt64(n Int8) error {
+ if !n.Valid {
+ *dst = Int<%= pg_byte_size %>{}
+ return nil
+ }
+
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is less than minimum value for Int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n.Int64)
+ }
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: int<%= pg_bit_size %>(n.Int64), Valid: true}
+
+ return nil
+}
+
+func (n Int<%= pg_byte_size %>) Int64Value() (Int8, error) {
+ return Int8{Int64: int64(n.Int<%= pg_bit_size %>), Valid: n.Valid}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int<%= pg_byte_size %>) Scan(src any) error {
+ if src == nil {
+ *dst = Int<%= pg_byte_size %>{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ var err error
+ n, err = strconv.ParseInt(src, 10, <%= pg_bit_size %>)
+ if err != nil {
+ return err
+ }
+ case []byte:
+ var err error
+ n, err = strconv.ParseInt(string(src), 10, <%= pg_bit_size %>)
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < math.MinInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n)
+ }
+ if n > math.MaxInt<%= pg_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for Int<%= pg_byte_size %>", n)
+ }
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: int<%= pg_bit_size %>(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int<%= pg_byte_size %>) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Int<%= pg_bit_size %>), nil
+}
+
+func (src Int<%= pg_byte_size %>) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+ return []byte(strconv.FormatInt(int64(src.Int<%= pg_bit_size %>), 10)), nil
+}
+
+func (dst *Int<%= pg_byte_size %>) UnmarshalJSON(b []byte) error {
+ var n *int<%= pg_bit_size %>
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ if n == nil {
+ *dst = Int<%= pg_byte_size %>{}
+ } else {
+ *dst = Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: *n, Valid: true}
+ }
+
+ return nil
+}
+
+type Int<%= pg_byte_size %>Codec struct{}
+
+func (Int<%= pg_byte_size %>Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Int<%= pg_byte_size %>Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Int<%= pg_byte_size %>Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case int<%= pg_bit_size %>:
+ return encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %>{}
+ case Int64Valuer:
+ return encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case int<%= pg_bit_size %>:
+ return encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %>{}
+ case Int64Valuer:
+ return encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %> struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecBinaryInt<%= pg_bit_size %>) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int<%= pg_bit_size %>)
+ return pgio.AppendInt<%= pg_bit_size %>(buf, int<%= pg_bit_size %>(n)), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %> struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecTextInt<%= pg_bit_size %>) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n := value.(int<%= pg_bit_size %>)
+ return append(buf, strconv.FormatInt(int64(n), 10)...), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is greater than maximum value for int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is less than minimum value for int<%= pg_byte_size %>", n.Int64)
+ }
+
+ return pgio.AppendInt<%= pg_bit_size %>(buf, int<%= pg_bit_size %>(n.Int64)), nil
+}
+
+type encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer struct{}
+
+func (encodePlanInt<%= pg_byte_size %>CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.Int64 > math.MaxInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is greater than maximum value for int<%= pg_byte_size %>", n.Int64)
+ }
+ if n.Int64 < math.MinInt<%= pg_bit_size %> {
+ return nil, fmt.Errorf("%d is less than minimum value for int<%= pg_byte_size %>", n.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(n.Int64, 10)...), nil
+}
+
+func (Int<%= pg_byte_size %>Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt8{}
+ case *int16:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt16{}
+ case *int32:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt32{}
+ case *int64:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt64{}
+ case *int:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt{}
+ case *uint8:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint8{}
+ case *uint16:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint16{}
+ case *uint32:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint32{}
+ case *uint64:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint64{}
+ case *uint:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToUint{}
+ case Int64Scanner:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *int8:
+ return scanPlanTextAnyToInt8{}
+ case *int16:
+ return scanPlanTextAnyToInt16{}
+ case *int32:
+ return scanPlanTextAnyToInt32{}
+ case *int64:
+ return scanPlanTextAnyToInt64{}
+ case *int:
+ return scanPlanTextAnyToInt{}
+ case *uint8:
+ return scanPlanTextAnyToUint8{}
+ case *uint16:
+ return scanPlanTextAnyToUint16{}
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case *uint64:
+ return scanPlanTextAnyToUint64{}
+ case *uint:
+ return scanPlanTextAnyToUint{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Int<%= pg_byte_size %>Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int64
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+func (c Int<%= pg_byte_size %>Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n int<%= pg_bit_size %>
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+<%# PostgreSQL binary format integer to fixed size Go integers %>
+<% [8, 16, 32, 64].each do |dst_bit_size| %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt<%= dst_bit_size %> struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt<%= dst_bit_size %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*int<%= dst_bit_size %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ <% if dst_bit_size < pg_bit_size %>
+ n := int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < math.MinInt<%= dst_bit_size %> {
+ return fmt.Errorf("%d is less than minimum value for int<%= dst_bit_size %>", n)
+ } else if n > math.MaxInt<%= dst_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for int<%= dst_bit_size %>", n)
+ }
+
+ *p = int<%= dst_bit_size %>(n)
+ <% elsif dst_bit_size == pg_bit_size %>
+ *p = int<%= dst_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ <% else %>
+ *p = int<%= dst_bit_size %>(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ <% end %>
+
+ return nil
+}
+
+type scanPlanBinaryInt<%= pg_byte_size %>ToUint<%= dst_bit_size %> struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToUint<%= dst_bit_size %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for uint<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint<%= dst_bit_size %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint<%= dst_bit_size %>", n)
+ }
+ <% if dst_bit_size < pg_bit_size %>
+ if n > math.MaxUint<%= dst_bit_size %> {
+ return fmt.Errorf("%d is greater than maximum value for uint<%= dst_bit_size %>", n)
+ }
+ <% end %>
+ *p = uint<%= dst_bit_size %>(n)
+
+ return nil
+}
+<% end %>
+
+<%# PostgreSQL binary format integer to Go machine integers %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*int)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ <% if 32 < pg_bit_size %>
+ n := int64(binary.BigEndian.Uint<%= pg_bit_size %>(src))
+ if n < math.MinInt {
+ return fmt.Errorf("%d is less than minimum value for int", n)
+ } else if n > math.MaxInt {
+ return fmt.Errorf("%d is greater than maximum value for int", n)
+ }
+
+ *p = int(n)
+ <% else %>
+ *p = int(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ <% end %>
+
+ return nil
+}
+
+type scanPlanBinaryInt<%= pg_byte_size %>ToUint struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToUint) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for uint<%= pg_byte_size %>: %v", len(src))
+ }
+
+ p, ok := (dst).(*uint)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+ if n < 0 {
+ return fmt.Errorf("%d is less than minimum value for uint", n)
+ }
+ <% if 32 < pg_bit_size %>
+ if uint64(n) > math.MaxUint {
+ return fmt.Errorf("%d is greater than maximum value for uint", n)
+ }
+ <% end %>
+ *p = uint(n)
+
+ return nil
+}
+
+<%# PostgreSQL binary format integer to Go Int64Scanner %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+
+ return s.ScanInt64(Int8{Int64: n, Valid: true})
+}
+
+<%# PostgreSQL binary format integer to Go TextScanner %>
+type scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner struct{}
+
+func (scanPlanBinaryInt<%= pg_byte_size %>ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != <%= pg_byte_size %> {
+ return fmt.Errorf("invalid length for int<%= pg_byte_size %>: %v", len(src))
+ }
+
+
+ n := int64(int<%= pg_bit_size %>(binary.BigEndian.Uint<%= pg_bit_size %>(src)))
+
+ return s.ScanText(Text{String: strconv.FormatInt(n, 10), Valid: true})
+}
+<% end %>
+
+<%# Any text to all integer types %>
+<% [
+ ["8", 8],
+ ["16", 16],
+ ["32", 32],
+ ["64", 64],
+ ["", 0]
+].each do |type_suffix, bit_size| %>
+type scanPlanTextAnyToInt<%= type_suffix %> struct{}
+
+func (scanPlanTextAnyToInt<%= type_suffix %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*int<%= type_suffix %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, <%= bit_size %>)
+ if err != nil {
+ return err
+ }
+
+ *p = int<%= type_suffix %>(n)
+ return nil
+}
+
+type scanPlanTextAnyToUint<%= type_suffix %> struct{}
+
+func (scanPlanTextAnyToUint<%= type_suffix %>) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p, ok := (dst).(*uint<%= type_suffix %>)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, <%= bit_size %>)
+ if err != nil {
+ return err
+ }
+
+ *p = uint<%= type_suffix %>(n)
+ return nil
+}
+<% end %>
+
+type scanPlanTextAnyToInt64Scanner struct{}
+
+func (scanPlanTextAnyToInt64Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Int64Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanInt64(Int8{})
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ err = s.ScanInt64(Int8{Int64: n, Valid: true})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb
new file mode 100644
index 0000000..ac9a3f1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/int_test.go.erb
@@ -0,0 +1,93 @@
+package pgtype_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+<% [2, 4, 8].each do |pg_byte_size| %>
+<% pg_bit_size = pg_byte_size * 8 %>
+func TestInt<%= pg_byte_size %>Codec(t *testing.T) {
+ pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int<%= pg_byte_size %>", []pgxtest.ValueRoundTripTest{
+ {int8(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int16(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int32(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int64(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint8(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint16(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint32(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint64(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {uint(1), new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {int32(-1), new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: -1, Valid: true})},
+ {1, new(int8), isExpectedEq(int8(1))},
+ {1, new(int16), isExpectedEq(int16(1))},
+ {1, new(int32), isExpectedEq(int32(1))},
+ {1, new(int64), isExpectedEq(int64(1))},
+ {1, new(uint8), isExpectedEq(uint8(1))},
+ {1, new(uint16), isExpectedEq(uint16(1))},
+ {1, new(uint32), isExpectedEq(uint32(1))},
+ {1, new(uint64), isExpectedEq(uint64(1))},
+ {1, new(int), isExpectedEq(int(1))},
+ {1, new(uint), isExpectedEq(uint(1))},
+ {-1, new(int8), isExpectedEq(int8(-1))},
+ {-1, new(int16), isExpectedEq(int16(-1))},
+ {-1, new(int32), isExpectedEq(int32(-1))},
+ {-1, new(int64), isExpectedEq(int64(-1))},
+ {-1, new(int), isExpectedEq(int(-1))},
+ {math.MinInt<%= pg_bit_size %>, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(math.MinInt<%= pg_bit_size %>))},
+ {-1, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(-1))},
+ {0, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(0))},
+ {1, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(1))},
+ {math.MaxInt<%= pg_bit_size %>, new(int<%= pg_bit_size %>), isExpectedEq(int<%= pg_bit_size %>(math.MaxInt<%= pg_bit_size %>))},
+ {1, new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true})},
+ {"1", new(string), isExpectedEq("1")},
+ {pgtype.Int<%= pg_byte_size %>{}, new(pgtype.Int<%= pg_byte_size %>), isExpectedEq(pgtype.Int<%= pg_byte_size %>{})},
+ {nil, new(*int<%= pg_bit_size %>), isExpectedEq((*int<%= pg_bit_size %>)(nil))},
+ })
+}
+
+func TestInt<%= pg_byte_size %>MarshalJSON(t *testing.T) {
+ successfulTests := []struct {
+ source pgtype.Int<%= pg_byte_size %>
+ result string
+ }{
+ {source: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 0}, result: "null"},
+ {source: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}, result: "1"},
+ }
+ for i, tt := range successfulTests {
+ r, err := tt.source.MarshalJSON()
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ }
+
+ if string(r) != tt.result {
+ t.Errorf("%d: expected %v to convert to %v, but it was %v", i, tt.source, tt.result, string(r))
+ }
+ }
+}
+
+func TestInt<%= pg_byte_size %>UnmarshalJSON(t *testing.T) {
+ successfulTests := []struct {
+ source string
+ result pgtype.Int<%= pg_byte_size %>
+ }{
+ {source: "null", result: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 0}},
+ {source: "1", result: pgtype.Int<%= pg_byte_size %>{Int<%= pg_bit_size %>: 1, Valid: true}},
+ }
+ for i, tt := range successfulTests {
+ var r pgtype.Int<%= pg_byte_size %>
+ err := r.UnmarshalJSON([]byte(tt.source))
+ if err != nil {
+ t.Errorf("%d: %v", i, err)
+ }
+
+ if r != tt.result {
+ t.Errorf("%d: expected %v to convert to %v, but it was %v", i, tt.source, tt.result, r)
+ }
+ }
+}
+<% end %>
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb
new file mode 100644
index 0000000..0175700
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test.go.erb
@@ -0,0 +1,62 @@
+package pgtype_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/jackc/pgx/v5/pgtype/testutil"
+ "github.com/jackc/pgx/v5"
+)
+
+<%
+ [
+ ["int4", ["int16", "int32", "int64", "uint64", "pgtype.Int4"], [[1, 1], [1, 10], [10, 1], [100, 10]]],
+ ["numeric", ["int64", "float64", "pgtype.Numeric"], [[1, 1], [1, 10], [10, 1], [100, 10]]],
+ ].each do |pg_type, go_types, rows_columns|
+%>
+<% go_types.each do |go_type| %>
+<% rows_columns.each do |rows, columns| %>
+<% [["Text", "pgx.TextFormatCode"], ["Binary", "pgx.BinaryFormatCode"]].each do |format_name, format_code| %>
+func BenchmarkQuery<%= format_name %>FormatDecode_PG_<%= pg_type %>_to_Go_<%= go_type.gsub(/\W/, "_") %>_<%= rows %>_rows_<%= columns %>_columns(b *testing.B) {
+ defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
+ b.ResetTimer()
+ var v [<%= columns %>]<%= go_type %>
+ for i := 0; i < b.N; i++ {
+ rows, _ := conn.Query(
+ ctx,
+ `select <% columns.times do |col_idx| %><% if col_idx != 0 %>, <% end %>n::<%= pg_type %> + <%= col_idx%><% end %> from generate_series(1, <%= rows %>) n`,
+ []any{pgx.QueryResultFormats{<%= format_code %>}},
+ )
+ _, err := pgx.ForEachRow(rows, []any{<% columns.times do |col_idx| %><% if col_idx != 0 %>, <% end %>&v[<%= col_idx%>]<% end %>}, func() error { return nil })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+<% end %>
+<% end %>
+<% end %>
+<% end %>
+
+<% [10, 100, 1000].each do |array_size| %>
+<% [["Text", "pgx.TextFormatCode"], ["Binary", "pgx.BinaryFormatCode"]].each do |format_name, format_code| %>
+func BenchmarkQuery<%= format_name %>FormatDecode_PG_Int4Array_With_Go_Int4Array_<%= array_size %>(b *testing.B) {
+ defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
+ b.ResetTimer()
+ var v []int32
+ for i := 0; i < b.N; i++ {
+ rows, _ := conn.Query(
+ ctx,
+ `select array_agg(n) from generate_series(1, <%= array_size %>) n`,
+ []any{pgx.QueryResultFormats{<%= format_code %>}},
+ )
+ _, err := pgx.ForEachRow(rows, []any{&v}, func() error { return nil })
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
+<% end %>
+<% end %>
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh
new file mode 100644
index 0000000..22ac01a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/integration_benchmark_test_gen.sh
@@ -0,0 +1,2 @@
+erb integration_benchmark_test.go.erb > integration_benchmark_test.go
+goimports -w integration_benchmark_test.go
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
new file mode 100644
index 0000000..06703d4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const (
+ microsecondsPerSecond = 1000000
+ microsecondsPerMinute = 60 * microsecondsPerSecond
+ microsecondsPerHour = 60 * microsecondsPerMinute
+ microsecondsPerDay = 24 * microsecondsPerHour
+ microsecondsPerMonth = 30 * microsecondsPerDay
+)
+
+type IntervalScanner interface {
+ ScanInterval(v Interval) error
+}
+
+type IntervalValuer interface {
+ IntervalValue() (Interval, error)
+}
+
+type Interval struct {
+ Microseconds int64
+ Days int32
+ Months int32
+ Valid bool
+}
+
+func (interval *Interval) ScanInterval(v Interval) error {
+ *interval = v
+ return nil
+}
+
+func (interval Interval) IntervalValue() (Interval, error) {
+ return interval, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (interval *Interval) Scan(src any) error {
+ if src == nil {
+ *interval = Interval{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToIntervalScanner{}.Scan([]byte(src), interval)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (interval Interval) Value() (driver.Value, error) {
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ buf, err := IntervalCodec{}.PlanEncode(nil, 0, TextFormatCode, interval).Encode(interval, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type IntervalCodec struct{}
+
+func (IntervalCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (IntervalCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (IntervalCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(IntervalValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanIntervalCodecBinary{}
+ case TextFormatCode:
+ return encodePlanIntervalCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanIntervalCodecBinary struct{}
+
+func (encodePlanIntervalCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ interval, err := value.(IntervalValuer).IntervalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt64(buf, interval.Microseconds)
+ buf = pgio.AppendInt32(buf, interval.Days)
+ buf = pgio.AppendInt32(buf, interval.Months)
+ return buf, nil
+}
+
+type encodePlanIntervalCodecText struct{}
+
+func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ interval, err := value.(IntervalValuer).IntervalValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !interval.Valid {
+ return nil, nil
+ }
+
+ if interval.Months != 0 {
+ buf = append(buf, strconv.FormatInt(int64(interval.Months), 10)...)
+ buf = append(buf, " mon "...)
+ }
+
+ if interval.Days != 0 {
+ buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...)
+ buf = append(buf, " day"...)
+ }
+
+ if interval.Microseconds != 0 {
+ buf = append(buf, " "...)
+
+ absMicroseconds := interval.Microseconds
+ if absMicroseconds < 0 {
+ absMicroseconds = -absMicroseconds
+ buf = append(buf, '-')
+ }
+
+ hours := absMicroseconds / microsecondsPerHour
+ minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
+ seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
+
+ timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ buf = append(buf, timeStr...)
+
+ microseconds := absMicroseconds % microsecondsPerSecond
+ if microseconds != 0 {
+ buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
+ }
+ }
+
+ return buf, nil
+}
+
+func (IntervalCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case IntervalScanner:
+ return scanPlanBinaryIntervalToIntervalScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case IntervalScanner:
+ return scanPlanTextAnyToIntervalScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryIntervalToIntervalScanner struct{}
+
+func (scanPlanBinaryIntervalToIntervalScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(IntervalScanner)
+
+ if src == nil {
+ return scanner.ScanInterval(Interval{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("Received an invalid size for an interval: %d", len(src))
+ }
+
+ microseconds := int64(binary.BigEndian.Uint64(src))
+ days := int32(binary.BigEndian.Uint32(src[8:]))
+ months := int32(binary.BigEndian.Uint32(src[12:]))
+
+ return scanner.ScanInterval(Interval{Microseconds: microseconds, Days: days, Months: months, Valid: true})
+}
+
+type scanPlanTextAnyToIntervalScanner struct{}
+
+func (scanPlanTextAnyToIntervalScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(IntervalScanner)
+
+ if src == nil {
+ return scanner.ScanInterval(Interval{})
+ }
+
+ var microseconds int64
+ var days int32
+ var months int32
+
+ parts := strings.Split(string(src), " ")
+
+ for i := 0; i < len(parts)-1; i += 2 {
+ scalar, err := strconv.ParseInt(parts[i], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval format")
+ }
+
+ switch parts[i+1] {
+ case "year", "years":
+ months += int32(scalar * 12)
+ case "mon", "mons":
+ months += int32(scalar)
+ case "day", "days":
+ days = int32(scalar)
+ }
+ }
+
+ if len(parts)%2 == 1 {
+ timeParts := strings.SplitN(parts[len(parts)-1], ":", 3)
+ if len(timeParts) != 3 {
+ return fmt.Errorf("bad interval format")
+ }
+
+ var negative bool
+ if timeParts[0][0] == '-' {
+ negative = true
+ timeParts[0] = timeParts[0][1:]
+ }
+
+ hours, err := strconv.ParseInt(timeParts[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval hour format: %s", timeParts[0])
+ }
+
+ minutes, err := strconv.ParseInt(timeParts[1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval minute format: %s", timeParts[1])
+ }
+
+ sec, secFrac, secFracFound := strings.Cut(timeParts[2], ".")
+
+ seconds, err := strconv.ParseInt(sec, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval second format: %s", sec)
+ }
+
+ var uSeconds int64
+ if secFracFound {
+ uSeconds, err = strconv.ParseInt(secFrac, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad interval decimal format: %s", secFrac)
+ }
+
+ for i := 0; i < 6-len(secFrac); i++ {
+ uSeconds *= 10
+ }
+ }
+
+ microseconds = hours * microsecondsPerHour
+ microseconds += minutes * microsecondsPerMinute
+ microseconds += seconds * microsecondsPerSecond
+ microseconds += uSeconds
+
+ if negative {
+ microseconds = -microseconds
+ }
+ }
+
+ return scanner.ScanInterval(Interval{Months: months, Days: days, Microseconds: microseconds, Valid: true})
+}
+
+func (c IntervalCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c IntervalCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var interval Interval
+ err := codecScan(c, m, oid, format, src, &interval)
+ if err != nil {
+ return nil, err
+ }
+ return interval, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/json.go b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
new file mode 100644
index 0000000..e71dcb9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
@@ -0,0 +1,230 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+type JSONCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*JSONCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*JSONCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch value.(type) {
+ case string:
+ return encodePlanJSONCodecEitherFormatString{}
+ case []byte:
+ return encodePlanJSONCodecEitherFormatByteSlice{}
+
+ // Handle json.RawMessage specifically because if it is run through json.Marshal it may be mutated.
+ // e.g. `{"foo": "bar"}` -> `{"foo":"bar"}`.
+ case json.RawMessage:
+ return encodePlanJSONCodecEitherFormatJSONRawMessage{}
+
+ // Cannot rely on driver.Valuer being handled later because anything can be marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1430
+ //
+ // Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to beused
+ // when both are implemented https://github.com/jackc/pgx/issues/1805
+ case driver.Valuer:
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+
+ // Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
+ // marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1681
+ case json.Marshaler:
+ return &encodePlanJSONCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+ }
+
+ // Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
+ // appropriate wrappers here.
+ for _, f := range []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ } {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ return &encodePlanJSONCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+}
+
+type encodePlanJSONCodecEitherFormatString struct{}
+
+func (encodePlanJSONCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonString := value.(string)
+ buf = append(buf, jsonString...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatByteSlice struct{}
+
+func (encodePlanJSONCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes := value.([]byte)
+ if jsonBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatJSONRawMessage struct{}
+
+func (encodePlanJSONCodecEitherFormatJSONRawMessage) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes := value.(json.RawMessage)
+ if jsonBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+type encodePlanJSONCodecEitherFormatMarshal struct {
+ marshal func(v any) ([]byte, error)
+}
+
+func (e *encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ jsonBytes, err := e.marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = append(buf, jsonBytes...)
+ return buf, nil
+}
+
+func (c *JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch target.(type) {
+ case *string:
+ return scanPlanAnyToString{}
+
+ case **string:
+ // This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
+ // solution would be.
+ //
+ // https://github.com/jackc/pgx/issues/1470 -- **string
+ // https://github.com/jackc/pgx/issues/1691 -- ** anything else
+
+ if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok {
+ if nextPlan := m.planScan(oid, format, nextDst); nextPlan != nil {
+ if _, failed := nextPlan.(*scanPlanFail); !failed {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ case *[]byte:
+ return scanPlanJSONToByteSlice{}
+ case BytesScanner:
+ return scanPlanBinaryBytesToBytesScanner{}
+
+ // Cannot rely on sql.Scanner being handled later because scanPlanJSONToJSONUnmarshal will take precedence.
+ //
+ // https://github.com/jackc/pgx/issues/1418
+ case sql.Scanner:
+ return &scanPlanSQLScanner{formatCode: format}
+ }
+
+ return &scanPlanJSONToJSONUnmarshal{
+ unmarshal: c.Unmarshal,
+ }
+}
+
+type scanPlanAnyToString struct{}
+
+func (scanPlanAnyToString) Scan(src []byte, dst any) error {
+ p := dst.(*string)
+ *p = string(src)
+ return nil
+}
+
+type scanPlanJSONToByteSlice struct{}
+
+func (scanPlanJSONToByteSlice) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanJSONToBytesScanner struct{}
+
+func (scanPlanJSONToBytesScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(BytesScanner)
+ return scanner.ScanBytes(src)
+}
+
+type scanPlanJSONToJSONUnmarshal struct {
+ unmarshal func(data []byte, v any) error
+}
+
+func (s *scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
+ if src == nil {
+ dstValue := reflect.ValueOf(dst)
+ if dstValue.Kind() == reflect.Ptr {
+ el := dstValue.Elem()
+ switch el.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface:
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ elem := reflect.ValueOf(dst).Elem()
+ elem.Set(reflect.Zero(elem.Type()))
+
+ return s.unmarshal(src, dst)
+}
+
+func (c *JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+}
+
+func (c *JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go
new file mode 100644
index 0000000..4d4eb58
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/jsonb.go
@@ -0,0 +1,129 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+type JSONBCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*JSONBCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*JSONBCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, TextFormatCode, value)
+ if plan != nil {
+ return &encodePlanJSONBCodecBinaryWrapper{textPlan: plan}
+ }
+ case TextFormatCode:
+ return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, format, value)
+ }
+
+ return nil
+}
+
+type encodePlanJSONBCodecBinaryWrapper struct {
+ textPlan EncodePlan
+}
+
+func (plan *encodePlanJSONBCodecBinaryWrapper) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ buf = append(buf, 1)
+ return plan.textPlan.Encode(value, buf)
+}
+
+func (c *JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, TextFormatCode, target)
+ if plan != nil {
+ return &scanPlanJSONBCodecBinaryUnwrapper{textPlan: plan}
+ }
+ case TextFormatCode:
+ return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, format, target)
+ }
+
+ return nil
+}
+
+type scanPlanJSONBCodecBinaryUnwrapper struct {
+ textPlan ScanPlan
+}
+
+func (plan *scanPlanJSONBCodecBinaryUnwrapper) Scan(src []byte, dst any) error {
+ if src == nil {
+ return plan.textPlan.Scan(src, dst)
+ }
+
+ if len(src) == 0 {
+ return fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ return plan.textPlan.Scan(src[1:], dst)
+}
+
+func (c *JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ if len(src) == 0 {
+ return nil, fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return nil, fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ dstBuf := make([]byte, len(src)-1)
+ copy(dstBuf, src[1:])
+ return dstBuf, nil
+ case TextFormatCode:
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code: %v", format)
+ }
+}
+
+func (c *JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ if len(src) == 0 {
+ return nil, fmt.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return nil, fmt.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ src = src[1:]
+ case TextFormatCode:
+ default:
+ return nil, fmt.Errorf("unknown format code: %v", format)
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/line.go b/vendor/github.com/jackc/pgx/v5/pgtype/line.go
new file mode 100644
index 0000000..4ae8003
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/line.go
@@ -0,0 +1,225 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type LineScanner interface {
+ ScanLine(v Line) error
+}
+
+type LineValuer interface {
+ LineValue() (Line, error)
+}
+
+type Line struct {
+ A, B, C float64
+ Valid bool
+}
+
+func (line *Line) ScanLine(v Line) error {
+ *line = v
+ return nil
+}
+
+func (line Line) LineValue() (Line, error) {
+ return line, nil
+}
+
+func (line *Line) Set(src any) error {
+ return fmt.Errorf("cannot convert %v to Line", src)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (line *Line) Scan(src any) error {
+ if src == nil {
+ *line = Line{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToLineScanner{}.Scan([]byte(src), line)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (line Line) Value() (driver.Value, error) {
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf, err := LineCodec{}.PlanEncode(nil, 0, TextFormatCode, line).Encode(line, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type LineCodec struct{}
+
+func (LineCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (LineCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (LineCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(LineValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanLineCodecBinary{}
+ case TextFormatCode:
+ return encodePlanLineCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanLineCodecBinary struct{}
+
+func (encodePlanLineCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ line, err := value.(LineValuer).LineValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.A))
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.B))
+ buf = pgio.AppendUint64(buf, math.Float64bits(line.C))
+ return buf, nil
+}
+
+type encodePlanLineCodecText struct{}
+
+func (encodePlanLineCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ line, err := value.(LineValuer).LineValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !line.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`{%s,%s,%s}`,
+ strconv.FormatFloat(line.A, 'f', -1, 64),
+ strconv.FormatFloat(line.B, 'f', -1, 64),
+ strconv.FormatFloat(line.C, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (LineCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case LineScanner:
+ return scanPlanBinaryLineToLineScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case LineScanner:
+ return scanPlanTextAnyToLineScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLineToLineScanner struct{}
+
+func (scanPlanBinaryLineToLineScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LineScanner)
+
+ if src == nil {
+ return scanner.ScanLine(Line{})
+ }
+
+ if len(src) != 24 {
+ return fmt.Errorf("invalid length for line: %v", len(src))
+ }
+
+ a := binary.BigEndian.Uint64(src)
+ b := binary.BigEndian.Uint64(src[8:])
+ c := binary.BigEndian.Uint64(src[16:])
+
+ return scanner.ScanLine(Line{
+ A: math.Float64frombits(a),
+ B: math.Float64frombits(b),
+ C: math.Float64frombits(c),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToLineScanner struct{}
+
+func (scanPlanTextAnyToLineScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LineScanner)
+
+ if src == nil {
+ return scanner.ScanLine(Line{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for line: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 3)
+ if len(parts) < 3 {
+ return fmt.Errorf("invalid format for line")
+ }
+
+ a, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ b, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ c, err := strconv.ParseFloat(parts[2], 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanLine(Line{A: a, B: b, C: c, Valid: true})
+}
+
+func (c LineCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c LineCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var line Line
+ err := codecScan(c, m, oid, format, src, &line)
+ if err != nil {
+ return nil, err
+ }
+ return line, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go b/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go
new file mode 100644
index 0000000..05a86e1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/lseg.go
@@ -0,0 +1,238 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type LsegScanner interface {
+ ScanLseg(v Lseg) error
+}
+
+type LsegValuer interface {
+ LsegValue() (Lseg, error)
+}
+
+type Lseg struct {
+ P [2]Vec2
+ Valid bool
+}
+
+func (lseg *Lseg) ScanLseg(v Lseg) error {
+ *lseg = v
+ return nil
+}
+
+func (lseg Lseg) LsegValue() (Lseg, error) {
+ return lseg, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (lseg *Lseg) Scan(src any) error {
+ if src == nil {
+ *lseg = Lseg{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToLsegScanner{}.Scan([]byte(src), lseg)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (lseg Lseg) Value() (driver.Value, error) {
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf, err := LsegCodec{}.PlanEncode(nil, 0, TextFormatCode, lseg).Encode(lseg, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type LsegCodec struct{}
+
+func (LsegCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (LsegCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (LsegCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(LsegValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanLsegCodecBinary{}
+ case TextFormatCode:
+ return encodePlanLsegCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanLsegCodecBinary struct{}
+
+func (encodePlanLsegCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ lseg, err := value.(LsegValuer).LsegValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(lseg.P[1].Y))
+ return buf, nil
+}
+
+type encodePlanLsegCodecText struct{}
+
+func (encodePlanLsegCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ lseg, err := value.(LsegValuer).LsegValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !lseg.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`[(%s,%s),(%s,%s)]`,
+ strconv.FormatFloat(lseg.P[0].X, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[0].Y, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[1].X, 'f', -1, 64),
+ strconv.FormatFloat(lseg.P[1].Y, 'f', -1, 64),
+ )...)
+ return buf, nil
+}
+
+func (LsegCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case LsegScanner:
+ return scanPlanBinaryLsegToLsegScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case LsegScanner:
+ return scanPlanTextAnyToLsegScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLsegToLsegScanner struct{}
+
+func (scanPlanBinaryLsegToLsegScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LsegScanner)
+
+ if src == nil {
+ return scanner.ScanLseg(Lseg{})
+ }
+
+ if len(src) != 32 {
+ return fmt.Errorf("invalid length for lseg: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ return scanner.ScanLseg(Lseg{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToLsegScanner struct{}
+
+func (scanPlanTextAnyToLsegScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(LsegScanner)
+
+ if src == nil {
+ return scanner.ScanLseg(Lseg{})
+ }
+
+ if len(src) < 11 {
+ return fmt.Errorf("invalid length for lseg: %v", len(src))
+ }
+
+ str := string(src[2:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-2]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanLseg(Lseg{P: [2]Vec2{{x1, y1}, {x2, y2}}, Valid: true})
+}
+
+func (c LsegCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c LsegCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var lseg Lseg
+ err := codecScan(c, m, oid, format, src, &lseg)
+ if err != nil {
+ return nil, err
+ }
+ return lseg, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go
new file mode 100644
index 0000000..6af3177
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go
@@ -0,0 +1,122 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+type LtreeCodec struct{}
+
+func (l LtreeCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+// PreferredFormat returns the preferred format.
+func (l LtreeCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+// PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be
+// found then nil is returned.
+func (l LtreeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode:
+ return (TextCodec)(l).PlanEncode(m, oid, format, value)
+ case BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodeLtreeCodecBinaryString{}
+ case []byte:
+ return encodeLtreeCodecBinaryByteSlice{}
+ case TextValuer:
+ return encodeLtreeCodecBinaryTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodeLtreeCodecBinaryString struct{}
+
+func (encodeLtreeCodecBinaryString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ltree := value.(string)
+ buf = append(buf, 1)
+ return append(buf, ltree...), nil
+}
+
+type encodeLtreeCodecBinaryByteSlice struct{}
+
+func (encodeLtreeCodecBinaryByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ltree := value.([]byte)
+ buf = append(buf, 1)
+ return append(buf, ltree...), nil
+}
+
+type encodeLtreeCodecBinaryTextValuer struct{}
+
+func (encodeLtreeCodecBinaryTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, 1)
+ return append(buf, t.String...), nil
+}
+
+// PlanScan returns a ScanPlan for scanning a PostgreSQL value into a destination with the same type as target. If
+// no plan can be found then nil is returned.
+func (l LtreeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode:
+ return (TextCodec)(l).PlanScan(m, oid, format, target)
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanBinaryLtreeToString{}
+ case TextScanner:
+ return scanPlanBinaryLtreeToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryLtreeToString struct{}
+
+func (scanPlanBinaryLtreeToString) Scan(src []byte, target any) error {
+ version := src[0]
+ if version != 1 {
+ return fmt.Errorf("unsupported ltree version %d", version)
+ }
+
+ p := (target).(*string)
+ *p = string(src[1:])
+
+ return nil
+}
+
+type scanPlanBinaryLtreeToTextScanner struct{}
+
+func (scanPlanBinaryLtreeToTextScanner) Scan(src []byte, target any) error {
+ version := src[0]
+ if version != 1 {
+ return fmt.Errorf("unsupported ltree version %d", version)
+ }
+
+ scanner := (target).(TextScanner)
+ return scanner.ScanText(Text{String: string(src[1:]), Valid: true})
+}
+
+// DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface.
+func (l LtreeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return (TextCodec)(l).DecodeDatabaseSQLValue(m, oid, format, src)
+}
+
+// DecodeValue returns src decoded into its default format.
+func (l LtreeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ return (TextCodec)(l).DecodeValue(m, oid, format, src)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go b/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go
new file mode 100644
index 0000000..e913ec9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/macaddr.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+)
+
+type MacaddrCodec struct{}
+
+func (MacaddrCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (MacaddrCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (MacaddrCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case net.HardwareAddr:
+ return encodePlanMacaddrCodecBinaryHardwareAddr{}
+ case TextValuer:
+ return encodePlanMacAddrCodecTextValuer{}
+
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case net.HardwareAddr:
+ return encodePlanMacaddrCodecTextHardwareAddr{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanMacaddrCodecBinaryHardwareAddr struct{}
+
+func (encodePlanMacaddrCodecBinaryHardwareAddr) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ addr := value.(net.HardwareAddr)
+ if addr == nil {
+ return nil, nil
+ }
+
+ return append(buf, addr...), nil
+}
+
+type encodePlanMacAddrCodecTextValuer struct{}
+
+func (encodePlanMacAddrCodecTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ addr, err := net.ParseMAC(t.String)
+ if err != nil {
+ return nil, err
+ }
+
+ return append(buf, addr...), nil
+}
+
+type encodePlanMacaddrCodecTextHardwareAddr struct{}
+
+func (encodePlanMacaddrCodecTextHardwareAddr) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ addr := value.(net.HardwareAddr)
+ if addr == nil {
+ return nil, nil
+ }
+
+ return append(buf, addr.String()...), nil
+}
+
+func (MacaddrCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *net.HardwareAddr:
+ return scanPlanBinaryMacaddrToHardwareAddr{}
+ case TextScanner:
+ return scanPlanBinaryMacaddrToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *net.HardwareAddr:
+ return scanPlanTextMacaddrToHardwareAddr{}
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryMacaddrToHardwareAddr struct{}
+
+func (scanPlanBinaryMacaddrToHardwareAddr) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*net.HardwareAddr)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanBinaryMacaddrToTextScanner struct{}
+
+func (scanPlanBinaryMacaddrToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: net.HardwareAddr(src).String(), Valid: true})
+}
+
+type scanPlanTextMacaddrToHardwareAddr struct{}
+
+func (scanPlanTextMacaddrToHardwareAddr) Scan(src []byte, dst any) error {
+ p := dst.(*net.HardwareAddr)
+
+ if src == nil {
+ *p = nil
+ return nil
+ }
+
+ addr, err := net.ParseMAC(string(src))
+ if err != nil {
+ return err
+ }
+
+ *p = addr
+
+ return nil
+}
+
+func (c MacaddrCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c MacaddrCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var addr net.HardwareAddr
+ err := codecScan(c, m, oid, format, src, &addr)
+ if err != nil {
+ return nil, err
+ }
+ return addr, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go b/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
new file mode 100644
index 0000000..e576378
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
@@ -0,0 +1,443 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "reflect"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// MultirangeGetter is a type that can be converted into a PostgreSQL multirange.
+type MultirangeGetter interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // Len returns the number of elements in the multirange.
+ Len() int
+
+ // Index returns the element at i.
+ Index(i int) any
+
+ // IndexType returns a non-nil scan target of the type Index will return. This is used by MultirangeCodec.PlanEncode.
+ IndexType() any
+}
+
+// MultirangeSetter is a type can be set from a PostgreSQL multirange.
+type MultirangeSetter interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // SetLen prepares the value such that ScanIndex can be called for each element. This will remove any existing
+ // elements.
+ SetLen(n int) error
+
+ // ScanIndex returns a value usable as a scan target for i. SetLen must be called before ScanIndex.
+ ScanIndex(i int) any
+
+ // ScanIndexType returns a non-nil scan target of the type ScanIndex will return. This is used by
+ // MultirangeCodec.PlanScan.
+ ScanIndexType() any
+}
+
+// MultirangeCodec is a codec for any multirange type.
+type MultirangeCodec struct {
+ ElementType *Type
+}
+
+func (c *MultirangeCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *MultirangeCodec) PreferredFormat() int16 {
+ return c.ElementType.Codec.PreferredFormat()
+}
+
+func (c *MultirangeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ multirangeValuer, ok := value.(MultirangeGetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := multirangeValuer.IndexType()
+
+ elementEncodePlan := m.PlanEncode(c.ElementType.OID, format, elementType)
+ if elementEncodePlan == nil {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanMultirangeCodecBinary{ac: c, m: m, oid: oid}
+ case TextFormatCode:
+ return &encodePlanMultirangeCodecText{ac: c, m: m, oid: oid}
+ }
+
+ return nil
+}
+
+type encodePlanMultirangeCodecText struct {
+ ac *MultirangeCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanMultirangeCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ multirange := value.(MultirangeGetter)
+
+ if multirange.IsNull() {
+ return nil, nil
+ }
+
+ elementCount := multirange.Len()
+
+ buf = append(buf, '{')
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ inElemBuf := make([]byte, 0, 32)
+ for i := 0; i < elementCount; i++ {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ elem := multirange.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, TextFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", multirange.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ return nil, fmt.Errorf("multirange cannot contain NULL element")
+ } else {
+ buf = append(buf, elemBuf...)
+ }
+ }
+
+ buf = append(buf, '}')
+
+ return buf, nil
+}
+
+type encodePlanMultirangeCodecBinary struct {
+ ac *MultirangeCodec
+ m *Map
+ oid uint32
+}
+
+func (p *encodePlanMultirangeCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ multirange := value.(MultirangeGetter)
+
+ if multirange.IsNull() {
+ return nil, nil
+ }
+
+ elementCount := multirange.Len()
+
+ buf = pgio.AppendInt32(buf, int32(elementCount))
+
+ var encodePlan EncodePlan
+ var lastElemType reflect.Type
+ for i := 0; i < elementCount; i++ {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elem := multirange.Index(i)
+ var elemBuf []byte
+ if elem != nil {
+ elemType := reflect.TypeOf(elem)
+ if lastElemType != elemType {
+ lastElemType = elemType
+ encodePlan = p.m.PlanEncode(p.ac.ElementType.OID, BinaryFormatCode, elem)
+ if encodePlan == nil {
+ return nil, fmt.Errorf("unable to encode %v", multirange.Index(i))
+ }
+ }
+ elemBuf, err = encodePlan.Encode(elem, buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if elemBuf == nil {
+ return nil, fmt.Errorf("multirange cannot contain NULL element")
+ } else {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+func (c *MultirangeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ multirangeScanner, ok := target.(MultirangeSetter)
+ if !ok {
+ return nil
+ }
+
+ elementType := multirangeScanner.ScanIndexType()
+
+ elementScanPlan := m.PlanScan(c.ElementType.OID, format, elementType)
+ if _, ok := elementScanPlan.(*scanPlanFail); ok {
+ return nil
+ }
+
+ return &scanPlanMultirangeCodec{
+ multirangeCodec: c,
+ m: m,
+ oid: oid,
+ formatCode: format,
+ }
+}
+
+func (c *MultirangeCodec) decodeBinary(m *Map, multirangeOID uint32, src []byte, multirange MultirangeSetter) error {
+ rp := 0
+
+ elementCount := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ err := multirange.SetLen(elementCount)
+ if err != nil {
+ return err
+ }
+
+ if elementCount == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, BinaryFormatCode, multirange.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, BinaryFormatCode, multirange.ScanIndex(0))
+ }
+
+ for i := 0; i < elementCount; i++ {
+ elem := multirange.ScanIndex(i)
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elementScanPlan.Scan(elemSrc, elem)
+ if err != nil {
+ return fmt.Errorf("failed to scan multirange element %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (c *MultirangeCodec) decodeText(m *Map, multirangeOID uint32, src []byte, multirange MultirangeSetter) error {
+ elements, err := parseUntypedTextMultirange(src)
+ if err != nil {
+ return err
+ }
+
+ err = multirange.SetLen(len(elements))
+ if err != nil {
+ return err
+ }
+
+ if len(elements) == 0 {
+ return nil
+ }
+
+ elementScanPlan := c.ElementType.Codec.PlanScan(m, c.ElementType.OID, TextFormatCode, multirange.ScanIndex(0))
+ if elementScanPlan == nil {
+ elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, multirange.ScanIndex(0))
+ }
+
+ for i, s := range elements {
+ elem := multirange.ScanIndex(i)
+ err = elementScanPlan.Scan([]byte(s), elem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type scanPlanMultirangeCodec struct {
+ multirangeCodec *MultirangeCodec
+ m *Map
+ oid uint32
+ formatCode int16
+ elementScanPlan ScanPlan
+}
+
+func (spac *scanPlanMultirangeCodec) Scan(src []byte, dst any) error {
+ c := spac.multirangeCodec
+ m := spac.m
+ oid := spac.oid
+ formatCode := spac.formatCode
+
+ multirange := dst.(MultirangeSetter)
+
+ if src == nil {
+ return multirange.ScanNull()
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ return c.decodeBinary(m, oid, src, multirange)
+ case TextFormatCode:
+ return c.decodeText(m, oid, src, multirange)
+ default:
+ return fmt.Errorf("unknown format code %d", formatCode)
+ }
+}
+
+func (c *MultirangeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *MultirangeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var multirange Multirange[Range[any]]
+ err := m.PlanScan(oid, format, &multirange).Scan(src, &multirange)
+ return multirange, err
+}
+
+func parseUntypedTextMultirange(src []byte) ([]string, error) {
+ elements := make([]string, 0)
+
+ buf := bytes.NewBuffer(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid array: %w", err)
+ }
+
+ if r != '{' {
+ return nil, fmt.Errorf("invalid multirange, expected '{' got %v", r)
+ }
+
+parseValueLoop:
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid multirange: %w", err)
+ }
+
+ switch r {
+ case ',': // skip range separator
+ case '}':
+ break parseValueLoop
+ default:
+ buf.UnreadRune()
+ value, err := parseRange(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid multirange value: %w", err)
+ }
+ elements = append(elements, value)
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return elements, nil
+
+}
+
+func parseRange(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ boundSepRead := false
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case ',', '}':
+ if r == ',' && !boundSepRead {
+ boundSepRead = true
+ break
+ }
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+// Multirange is a generic multirange type.
+//
+// T should implement RangeValuer and *T should implement RangeScanner. However, there does not appear to be a way to
+// enforce the RangeScanner constraint.
+type Multirange[T RangeValuer] []T
+
+func (r Multirange[T]) IsNull() bool {
+ return r == nil
+}
+
+func (r Multirange[T]) Len() int {
+ return len(r)
+}
+
+func (r Multirange[T]) Index(i int) any {
+ return r[i]
+}
+
+func (r Multirange[T]) IndexType() any {
+ var zero T
+ return zero
+}
+
+func (r *Multirange[T]) ScanNull() error {
+ *r = nil
+ return nil
+}
+
+func (r *Multirange[T]) SetLen(n int) error {
+ *r = make([]T, n)
+ return nil
+}
+
+func (r Multirange[T]) ScanIndex(i int) any {
+ return &r[i]
+}
+
+func (r Multirange[T]) ScanIndexType() any {
+ return new(T)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go b/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
new file mode 100644
index 0000000..4dbec78
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
@@ -0,0 +1,823 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// PostgreSQL internal numeric storage uses 16-bit "digits" with base of 10,000
+const nbase = 10000
+
+const (
+ pgNumericNaN = 0x00000000c0000000
+ pgNumericNaNSign = 0xc000
+
+ pgNumericPosInf = 0x00000000d0000000
+ pgNumericPosInfSign = 0xd000
+
+ pgNumericNegInf = 0x00000000f0000000
+ pgNumericNegInfSign = 0xf000
+)
+
+var big0 *big.Int = big.NewInt(0)
+var big1 *big.Int = big.NewInt(1)
+var big10 *big.Int = big.NewInt(10)
+var big100 *big.Int = big.NewInt(100)
+var big1000 *big.Int = big.NewInt(1000)
+
+var bigNBase *big.Int = big.NewInt(nbase)
+var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase)
+var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase)
+var bigNBaseX4 *big.Int = big.NewInt(nbase * nbase * nbase * nbase)
+
+type NumericScanner interface {
+ ScanNumeric(v Numeric) error
+}
+
+type NumericValuer interface {
+ NumericValue() (Numeric, error)
+}
+
+type Numeric struct {
+ Int *big.Int
+ Exp int32
+ NaN bool
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (n *Numeric) ScanNumeric(v Numeric) error {
+ *n = v
+ return nil
+}
+
+func (n Numeric) NumericValue() (Numeric, error) {
+ return n, nil
+}
+
+func (n Numeric) Float64Value() (Float8, error) {
+ if !n.Valid {
+ return Float8{}, nil
+ } else if n.NaN {
+ return Float8{Float64: math.NaN(), Valid: true}, nil
+ } else if n.InfinityModifier == Infinity {
+ return Float8{Float64: math.Inf(1), Valid: true}, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ return Float8{Float64: math.Inf(-1), Valid: true}, nil
+ }
+
+ buf := make([]byte, 0, 32)
+
+ if n.Int == nil {
+ buf = append(buf, '0')
+ } else {
+ buf = append(buf, n.Int.String()...)
+ }
+ buf = append(buf, 'e')
+ buf = append(buf, strconv.FormatInt(int64(n.Exp), 10)...)
+
+ f, err := strconv.ParseFloat(string(buf), 64)
+ if err != nil {
+ return Float8{}, err
+ }
+
+ return Float8{Float64: f, Valid: true}, nil
+}
+
+func (n *Numeric) ScanInt64(v Int8) error {
+ if !v.Valid {
+ *n = Numeric{}
+ return nil
+ }
+
+ *n = Numeric{Int: big.NewInt(v.Int64), Valid: true}
+ return nil
+}
+
+func (n Numeric) Int64Value() (Int8, error) {
+ if !n.Valid {
+ return Int8{}, nil
+ }
+
+ bi, err := n.toBigInt()
+ if err != nil {
+ return Int8{}, err
+ }
+
+ if !bi.IsInt64() {
+ return Int8{}, fmt.Errorf("cannot convert %v to int64", n)
+ }
+
+ return Int8{Int64: bi.Int64(), Valid: true}, nil
+}
+
+func (n *Numeric) ScanScientific(src string) error {
+ if !strings.ContainsAny("eE", src) {
+ return scanPlanTextAnyToNumericScanner{}.Scan([]byte(src), n)
+ }
+
+ if bigF, ok := new(big.Float).SetString(string(src)); ok {
+ smallF, _ := bigF.Float64()
+ src = strconv.FormatFloat(smallF, 'f', -1, 64)
+ }
+
+ num, exp, err := parseNumericString(src)
+ if err != nil {
+ return err
+ }
+
+ *n = Numeric{Int: num, Exp: exp, Valid: true}
+
+ return nil
+}
+
+func (n *Numeric) toBigInt() (*big.Int, error) {
+ if n.Exp == 0 {
+ return n.Int, nil
+ }
+
+ num := &big.Int{}
+ num.Set(n.Int)
+ if n.Exp > 0 {
+ mul := &big.Int{}
+ mul.Exp(big10, big.NewInt(int64(n.Exp)), nil)
+ num.Mul(num, mul)
+ return num, nil
+ }
+
+ div := &big.Int{}
+ div.Exp(big10, big.NewInt(int64(-n.Exp)), nil)
+ remainder := &big.Int{}
+ num.DivMod(num, div, remainder)
+ if remainder.Cmp(big0) != 0 {
+ return nil, fmt.Errorf("cannot convert %v to integer", n)
+ }
+ return num, nil
+}
+
+func parseNumericString(str string) (n *big.Int, exp int32, err error) {
+ idx := strings.IndexByte(str, '.')
+
+ if idx == -1 {
+ for len(str) > 1 && str[len(str)-1] == '0' && str[len(str)-2] != '-' {
+ str = str[:len(str)-1]
+ exp++
+ }
+ } else {
+ exp = int32(-(len(str) - idx - 1))
+ str = str[:idx] + str[idx+1:]
+ }
+
+ accum := &big.Int{}
+ if _, ok := accum.SetString(str, 10); !ok {
+ return nil, 0, fmt.Errorf("%s is not a number", str)
+ }
+
+ return accum, exp, nil
+}
+
+func nbaseDigitsToInt64(src []byte) (accum int64, bytesRead, digitsRead int) {
+ digits := len(src) / 2
+ if digits > 4 {
+ digits = 4
+ }
+
+ rp := 0
+
+ for i := 0; i < digits; i++ {
+ if i > 0 {
+ accum *= nbase
+ }
+ accum += int64(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return accum, rp, digits
+}
+
+// Scan implements the database/sql Scanner interface.
+func (n *Numeric) Scan(src any) error {
+ if src == nil {
+ *n = Numeric{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToNumericScanner{}.Scan([]byte(src), n)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (n Numeric) Value() (driver.Value, error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ buf, err := NumericCodec{}.PlanEncode(nil, 0, TextFormatCode, n).Encode(n, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+func (n Numeric) MarshalJSON() ([]byte, error) {
+ if !n.Valid {
+ return []byte("null"), nil
+ }
+
+ if n.NaN {
+ return []byte(`"NaN"`), nil
+ }
+
+ return n.numberTextBytes(), nil
+}
+
+func (n *Numeric) UnmarshalJSON(src []byte) error {
+ if bytes.Equal(src, []byte(`null`)) {
+ *n = Numeric{}
+ return nil
+ }
+ if bytes.Equal(src, []byte(`"NaN"`)) {
+ *n = Numeric{NaN: true, Valid: true}
+ return nil
+ }
+ return scanPlanTextAnyToNumericScanner{}.Scan(src, n)
+}
+
+// numberString returns a string of the number. undefined if NaN, infinite, or NULL
+func (n Numeric) numberTextBytes() []byte {
+ intStr := n.Int.String()
+
+ buf := &bytes.Buffer{}
+
+ if len(intStr) > 0 && intStr[:1] == "-" {
+ intStr = intStr[1:]
+ buf.WriteByte('-')
+ }
+
+ exp := int(n.Exp)
+ if exp > 0 {
+ buf.WriteString(intStr)
+ for i := 0; i < exp; i++ {
+ buf.WriteByte('0')
+ }
+ } else if exp < 0 {
+ if len(intStr) <= -exp {
+ buf.WriteString("0.")
+ leadingZeros := -exp - len(intStr)
+ for i := 0; i < leadingZeros; i++ {
+ buf.WriteByte('0')
+ }
+ buf.WriteString(intStr)
+ } else if len(intStr) > -exp {
+ dpPos := len(intStr) + exp
+ buf.WriteString(intStr[:dpPos])
+ buf.WriteByte('.')
+ buf.WriteString(intStr[dpPos:])
+ }
+ } else {
+ buf.WriteString(intStr)
+ }
+
+ return buf.Bytes()
+}
+
+type NumericCodec struct{}
+
+func (NumericCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (NumericCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (NumericCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case NumericValuer:
+ return encodePlanNumericCodecBinaryNumericValuer{}
+ case Float64Valuer:
+ return encodePlanNumericCodecBinaryFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanNumericCodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case NumericValuer:
+ return encodePlanNumericCodecTextNumericValuer{}
+ case Float64Valuer:
+ return encodePlanNumericCodecTextFloat64Valuer{}
+ case Int64Valuer:
+ return encodePlanNumericCodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanNumericCodecBinaryNumericValuer struct{}
+
+func (encodePlanNumericCodecBinaryNumericValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(NumericValuer).NumericValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericBinary(n, buf)
+}
+
+type encodePlanNumericCodecBinaryFloat64Valuer struct{}
+
+func (encodePlanNumericCodecBinaryFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if math.IsNaN(n.Float64) {
+ return encodeNumericBinary(Numeric{NaN: true, Valid: true}, buf)
+ } else if math.IsInf(n.Float64, 1) {
+ return encodeNumericBinary(Numeric{InfinityModifier: Infinity, Valid: true}, buf)
+ } else if math.IsInf(n.Float64, -1) {
+ return encodeNumericBinary(Numeric{InfinityModifier: NegativeInfinity, Valid: true}, buf)
+ }
+ num, exp, err := parseNumericString(strconv.FormatFloat(n.Float64, 'f', -1, 64))
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericBinary(Numeric{Int: num, Exp: exp, Valid: true}, buf)
+}
+
+type encodePlanNumericCodecBinaryInt64Valuer struct{}
+
+func (encodePlanNumericCodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ return encodeNumericBinary(Numeric{Int: big.NewInt(n.Int64), Valid: true}, buf)
+}
+
+func encodeNumericBinary(n Numeric, buf []byte) (newBuf []byte, err error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.NaN {
+ buf = pgio.AppendUint64(buf, pgNumericNaN)
+ return buf, nil
+ } else if n.InfinityModifier == Infinity {
+ buf = pgio.AppendUint64(buf, pgNumericPosInf)
+ return buf, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ buf = pgio.AppendUint64(buf, pgNumericNegInf)
+ return buf, nil
+ }
+
+ var sign int16
+ if n.Int.Cmp(big0) < 0 {
+ sign = 16384
+ }
+
+ absInt := &big.Int{}
+ wholePart := &big.Int{}
+ fracPart := &big.Int{}
+ remainder := &big.Int{}
+ absInt.Abs(n.Int)
+
+ // Normalize absInt and exp to where exp is always a multiple of 4. This makes
+ // converting to 16-bit base 10,000 digits easier.
+ var exp int32
+ switch n.Exp % 4 {
+ case 1, -3:
+ exp = n.Exp - 1
+ absInt.Mul(absInt, big10)
+ case 2, -2:
+ exp = n.Exp - 2
+ absInt.Mul(absInt, big100)
+ case 3, -1:
+ exp = n.Exp - 3
+ absInt.Mul(absInt, big1000)
+ default:
+ exp = n.Exp
+ }
+
+ if exp < 0 {
+ divisor := &big.Int{}
+ divisor.Exp(big10, big.NewInt(int64(-exp)), nil)
+ wholePart.DivMod(absInt, divisor, fracPart)
+ fracPart.Add(fracPart, divisor)
+ } else {
+ wholePart = absInt
+ }
+
+ var wholeDigits, fracDigits []int16
+
+ for wholePart.Cmp(big0) != 0 {
+ wholePart.DivMod(wholePart, bigNBase, remainder)
+ wholeDigits = append(wholeDigits, int16(remainder.Int64()))
+ }
+
+ if fracPart.Cmp(big0) != 0 {
+ for fracPart.Cmp(big1) != 0 {
+ fracPart.DivMod(fracPart, bigNBase, remainder)
+ fracDigits = append(fracDigits, int16(remainder.Int64()))
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(wholeDigits)+len(fracDigits)))
+
+ var weight int16
+ if len(wholeDigits) > 0 {
+ weight = int16(len(wholeDigits) - 1)
+ if exp > 0 {
+ weight += int16(exp / 4)
+ }
+ } else {
+ weight = int16(exp/4) - 1 + int16(len(fracDigits))
+ }
+ buf = pgio.AppendInt16(buf, weight)
+
+ buf = pgio.AppendInt16(buf, sign)
+
+ var dscale int16
+ if n.Exp < 0 {
+ dscale = int16(-n.Exp)
+ }
+ buf = pgio.AppendInt16(buf, dscale)
+
+ for i := len(wholeDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, wholeDigits[i])
+ }
+
+ for i := len(fracDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, fracDigits[i])
+ }
+
+ return buf, nil
+}
+
+type encodePlanNumericCodecTextNumericValuer struct{}
+
+func (encodePlanNumericCodecTextNumericValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(NumericValuer).NumericValue()
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeNumericText(n, buf)
+}
+
+type encodePlanNumericCodecTextFloat64Valuer struct{}
+
+func (encodePlanNumericCodecTextFloat64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Float64Valuer).Float64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if math.IsNaN(n.Float64) {
+ buf = append(buf, "NaN"...)
+ } else if math.IsInf(n.Float64, 1) {
+ buf = append(buf, "Infinity"...)
+ } else if math.IsInf(n.Float64, -1) {
+ buf = append(buf, "-Infinity"...)
+ } else {
+ buf = append(buf, strconv.FormatFloat(n.Float64, 'f', -1, 64)...)
+ }
+ return buf, nil
+}
+
+type encodePlanNumericCodecTextInt64Valuer struct{}
+
+func (encodePlanNumericCodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ n, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !n.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, strconv.FormatInt(n.Int64, 10)...)
+ return buf, nil
+}
+
+func encodeNumericText(n Numeric, buf []byte) (newBuf []byte, err error) {
+ if !n.Valid {
+ return nil, nil
+ }
+
+ if n.NaN {
+ buf = append(buf, "NaN"...)
+ return buf, nil
+ } else if n.InfinityModifier == Infinity {
+ buf = append(buf, "Infinity"...)
+ return buf, nil
+ } else if n.InfinityModifier == NegativeInfinity {
+ buf = append(buf, "-Infinity"...)
+ return buf, nil
+ }
+
+ buf = append(buf, n.numberTextBytes()...)
+
+ return buf, nil
+}
+
+func (NumericCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case NumericScanner:
+ return scanPlanBinaryNumericToNumericScanner{}
+ case Float64Scanner:
+ return scanPlanBinaryNumericToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanBinaryNumericToInt64Scanner{}
+ case TextScanner:
+ return scanPlanBinaryNumericToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case NumericScanner:
+ return scanPlanTextAnyToNumericScanner{}
+ case Float64Scanner:
+ return scanPlanTextAnyToFloat64Scanner{}
+ case Int64Scanner:
+ return scanPlanTextAnyToInt64Scanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryNumericToNumericScanner struct{}
+
+func (scanPlanBinaryNumericToNumericScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NumericScanner)
+
+ if src == nil {
+ return scanner.ScanNumeric(Numeric{})
+ }
+
+ if len(src) < 8 {
+ return fmt.Errorf("numeric incomplete %v", src)
+ }
+
+ rp := 0
+ ndigits := binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ weight := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ sign := binary.BigEndian.Uint16(src[rp:])
+ rp += 2
+ dscale := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if sign == pgNumericNaNSign {
+ return scanner.ScanNumeric(Numeric{NaN: true, Valid: true})
+ } else if sign == pgNumericPosInfSign {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: Infinity, Valid: true})
+ } else if sign == pgNumericNegInfSign {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: NegativeInfinity, Valid: true})
+ }
+
+ if ndigits == 0 {
+ return scanner.ScanNumeric(Numeric{Int: big.NewInt(0), Valid: true})
+ }
+
+ if len(src[rp:]) < int(ndigits)*2 {
+ return fmt.Errorf("numeric incomplete %v", src)
+ }
+
+ accum := &big.Int{}
+
+ for i := 0; i < int(ndigits+3)/4; i++ {
+ int64accum, bytesRead, digitsRead := nbaseDigitsToInt64(src[rp:])
+ rp += bytesRead
+
+ if i > 0 {
+ var mul *big.Int
+ switch digitsRead {
+ case 1:
+ mul = bigNBase
+ case 2:
+ mul = bigNBaseX2
+ case 3:
+ mul = bigNBaseX3
+ case 4:
+ mul = bigNBaseX4
+ default:
+ return fmt.Errorf("invalid digitsRead: %d (this can't happen)", digitsRead)
+ }
+ accum.Mul(accum, mul)
+ }
+
+ accum.Add(accum, big.NewInt(int64accum))
+ }
+
+ exp := (int32(weight) - int32(ndigits) + 1) * 4
+
+ if dscale > 0 {
+ fracNBaseDigits := int16(int32(ndigits) - int32(weight) - 1)
+ fracDecimalDigits := fracNBaseDigits * 4
+
+ if dscale > fracDecimalDigits {
+ multCount := int(dscale - fracDecimalDigits)
+ for i := 0; i < multCount; i++ {
+ accum.Mul(accum, big10)
+ exp--
+ }
+ } else if dscale < fracDecimalDigits {
+ divCount := int(fracDecimalDigits - dscale)
+ for i := 0; i < divCount; i++ {
+ accum.Div(accum, big10)
+ exp++
+ }
+ }
+ }
+
+ reduced := &big.Int{}
+ remainder := &big.Int{}
+ if exp >= 0 {
+ for {
+ reduced.DivMod(accum, big10, remainder)
+ if remainder.Cmp(big0) != 0 {
+ break
+ }
+ accum.Set(reduced)
+ exp++
+ }
+ }
+
+ if sign != 0 {
+ accum.Neg(accum)
+ }
+
+ return scanner.ScanNumeric(Numeric{Int: accum, Exp: exp, Valid: true})
+}
+
+type scanPlanBinaryNumericToFloat64Scanner struct{}
+
+func (scanPlanBinaryNumericToFloat64Scanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(Float64Scanner)
+
+ if src == nil {
+ return scanner.ScanFloat64(Float8{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ f8, err := n.Float64Value()
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanFloat64(f8)
+}
+
+type scanPlanBinaryNumericToInt64Scanner struct{}
+
+func (scanPlanBinaryNumericToInt64Scanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(Int64Scanner)
+
+ if src == nil {
+ return scanner.ScanInt64(Int8{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ bigInt, err := n.toBigInt()
+ if err != nil {
+ return err
+ }
+
+ if !bigInt.IsInt64() {
+ return fmt.Errorf("%v is out of range for int64", bigInt)
+ }
+
+ return scanner.ScanInt64(Int8{Int64: bigInt.Int64(), Valid: true})
+}
+
+type scanPlanBinaryNumericToTextScanner struct{}
+
+func (scanPlanBinaryNumericToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ var n Numeric
+
+ err := scanPlanBinaryNumericToNumericScanner{}.Scan(src, &n)
+ if err != nil {
+ return err
+ }
+
+ sbuf, err := encodeNumericText(n, nil)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanText(Text{String: string(sbuf), Valid: true})
+}
+
+type scanPlanTextAnyToNumericScanner struct{}
+
+func (scanPlanTextAnyToNumericScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(NumericScanner)
+
+ if src == nil {
+ return scanner.ScanNumeric(Numeric{})
+ }
+
+ if string(src) == "NaN" {
+ return scanner.ScanNumeric(Numeric{NaN: true, Valid: true})
+ } else if string(src) == "Infinity" {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: Infinity, Valid: true})
+ } else if string(src) == "-Infinity" {
+ return scanner.ScanNumeric(Numeric{InfinityModifier: NegativeInfinity, Valid: true})
+ }
+
+ num, exp, err := parseNumericString(string(src))
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanNumeric(Numeric{Int: num, Exp: exp, Valid: true})
+}
+
+func (c NumericCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if format == TextFormatCode {
+ return string(src), nil
+ }
+
+ var n Numeric
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+
+ buf, err := m.Encode(oid, TextFormatCode, n, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+}
+
+func (c NumericCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n Numeric
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/path.go b/vendor/github.com/jackc/pgx/v5/pgtype/path.go
new file mode 100644
index 0000000..73e0ec5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/path.go
@@ -0,0 +1,272 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type PathScanner interface {
+ ScanPath(v Path) error
+}
+
+type PathValuer interface {
+ PathValue() (Path, error)
+}
+
+type Path struct {
+ P []Vec2
+ Closed bool
+ Valid bool
+}
+
+func (path *Path) ScanPath(v Path) error {
+ *path = v
+ return nil
+}
+
+func (path Path) PathValue() (Path, error) {
+ return path, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (path *Path) Scan(src any) error {
+ if src == nil {
+ *path = Path{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPathScanner{}.Scan([]byte(src), path)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (path Path) Value() (driver.Value, error) {
+ if !path.Valid {
+ return nil, nil
+ }
+
+ buf, err := PathCodec{}.PlanEncode(nil, 0, TextFormatCode, path).Encode(path, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), err
+}
+
+type PathCodec struct{}
+
+func (PathCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PathCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PathCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PathValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPathCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPathCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPathCodecBinary struct{}
+
+func (encodePlanPathCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ path, err := value.(PathValuer).PathValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !path.Valid {
+ return nil, nil
+ }
+
+ var closeByte byte
+ if path.Closed {
+ closeByte = 1
+ }
+ buf = append(buf, closeByte)
+
+ buf = pgio.AppendInt32(buf, int32(len(path.P)))
+
+ for _, p := range path.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+type encodePlanPathCodecText struct{}
+
+func (encodePlanPathCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ path, err := value.(PathValuer).PathValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !path.Valid {
+ return nil, nil
+ }
+
+ var startByte, endByte byte
+ if path.Closed {
+ startByte = '('
+ endByte = ')'
+ } else {
+ startByte = '['
+ endByte = ']'
+ }
+ buf = append(buf, startByte)
+
+ for i, p := range path.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(p.X, 'f', -1, 64),
+ strconv.FormatFloat(p.Y, 'f', -1, 64),
+ )...)
+ }
+
+ buf = append(buf, endByte)
+
+ return buf, nil
+}
+
+func (PathCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PathScanner:
+ return scanPlanBinaryPathToPathScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PathScanner:
+ return scanPlanTextAnyToPathScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryPathToPathScanner struct{}
+
+func (scanPlanBinaryPathToPathScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PathScanner)
+
+ if src == nil {
+ return scanner.ScanPath(Path{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == 1
+ pointCount := int(binary.BigEndian.Uint32(src[1:]))
+
+ rp := 5
+
+ if 5+pointCount*16 != len(src) {
+ return fmt.Errorf("invalid length for Path with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ return scanner.ScanPath(Path{
+ P: points,
+ Closed: closed,
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPathScanner struct{}
+
+func (scanPlanTextAnyToPathScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PathScanner)
+
+ if src == nil {
+ return scanner.ScanPath(Path{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == '('
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ return scanner.ScanPath(Path{P: points, Closed: closed, Valid: true})
+}
+
+func (c PathCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PathCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var path Path
+ err := codecScan(c, m, oid, format, src, &path)
+ if err != nil {
+ return nil, err
+ }
+ return path, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
new file mode 100644
index 0000000..4082956
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
@@ -0,0 +1,2031 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "reflect"
+ "time"
+)
+
+// PostgreSQL oids for common types
+const (
+ BoolOID = 16
+ ByteaOID = 17
+ QCharOID = 18
+ NameOID = 19
+ Int8OID = 20
+ Int2OID = 21
+ Int4OID = 23
+ TextOID = 25
+ OIDOID = 26
+ TIDOID = 27
+ XIDOID = 28
+ CIDOID = 29
+ JSONOID = 114
+ JSONArrayOID = 199
+ PointOID = 600
+ LsegOID = 601
+ PathOID = 602
+ BoxOID = 603
+ PolygonOID = 604
+ LineOID = 628
+ LineArrayOID = 629
+ CIDROID = 650
+ CIDRArrayOID = 651
+ Float4OID = 700
+ Float8OID = 701
+ CircleOID = 718
+ CircleArrayOID = 719
+ UnknownOID = 705
+ Macaddr8OID = 774
+ MacaddrOID = 829
+ InetOID = 869
+ BoolArrayOID = 1000
+ QCharArrayOID = 1002
+ NameArrayOID = 1003
+ Int2ArrayOID = 1005
+ Int4ArrayOID = 1007
+ TextArrayOID = 1009
+ TIDArrayOID = 1010
+ ByteaArrayOID = 1001
+ XIDArrayOID = 1011
+ CIDArrayOID = 1012
+ BPCharArrayOID = 1014
+ VarcharArrayOID = 1015
+ Int8ArrayOID = 1016
+ PointArrayOID = 1017
+ LsegArrayOID = 1018
+ PathArrayOID = 1019
+ BoxArrayOID = 1020
+ Float4ArrayOID = 1021
+ Float8ArrayOID = 1022
+ PolygonArrayOID = 1027
+ OIDArrayOID = 1028
+ ACLItemOID = 1033
+ ACLItemArrayOID = 1034
+ MacaddrArrayOID = 1040
+ InetArrayOID = 1041
+ BPCharOID = 1042
+ VarcharOID = 1043
+ DateOID = 1082
+ TimeOID = 1083
+ TimestampOID = 1114
+ TimestampArrayOID = 1115
+ DateArrayOID = 1182
+ TimeArrayOID = 1183
+ TimestamptzOID = 1184
+ TimestamptzArrayOID = 1185
+ IntervalOID = 1186
+ IntervalArrayOID = 1187
+ NumericArrayOID = 1231
+ TimetzOID = 1266
+ TimetzArrayOID = 1270
+ BitOID = 1560
+ BitArrayOID = 1561
+ VarbitOID = 1562
+ VarbitArrayOID = 1563
+ NumericOID = 1700
+ RecordOID = 2249
+ RecordArrayOID = 2287
+ UUIDOID = 2950
+ UUIDArrayOID = 2951
+ JSONBOID = 3802
+ JSONBArrayOID = 3807
+ DaterangeOID = 3912
+ DaterangeArrayOID = 3913
+ Int4rangeOID = 3904
+ Int4rangeArrayOID = 3905
+ NumrangeOID = 3906
+ NumrangeArrayOID = 3907
+ TsrangeOID = 3908
+ TsrangeArrayOID = 3909
+ TstzrangeOID = 3910
+ TstzrangeArrayOID = 3911
+ Int8rangeOID = 3926
+ Int8rangeArrayOID = 3927
+ JSONPathOID = 4072
+ JSONPathArrayOID = 4073
+ Int4multirangeOID = 4451
+ NummultirangeOID = 4532
+ TsmultirangeOID = 4533
+ TstzmultirangeOID = 4534
+ DatemultirangeOID = 4535
+ Int8multirangeOID = 4536
+ Int4multirangeArrayOID = 6150
+ NummultirangeArrayOID = 6151
+ TsmultirangeArrayOID = 6152
+ TstzmultirangeArrayOID = 6153
+ DatemultirangeArrayOID = 6155
+ Int8multirangeArrayOID = 6157
+)
+
+type InfinityModifier int8
+
+const (
+ Infinity InfinityModifier = 1
+ Finite InfinityModifier = 0
+ NegativeInfinity InfinityModifier = -Infinity
+)
+
+func (im InfinityModifier) String() string {
+ switch im {
+ case Finite:
+ return "finite"
+ case Infinity:
+ return "infinity"
+ case NegativeInfinity:
+ return "-infinity"
+ default:
+ return "invalid"
+ }
+}
+
+// PostgreSQL format codes
+const (
+ TextFormatCode = 0
+ BinaryFormatCode = 1
+)
+
+// A Codec converts between Go and PostgreSQL values. A Codec must not be mutated after it is registered with a Map.
+type Codec interface {
+ // FormatSupported returns true if the format is supported.
+ FormatSupported(int16) bool
+
+ // PreferredFormat returns the preferred format.
+ PreferredFormat() int16
+
+ // PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be
+ // found then nil is returned.
+ PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan
+
+ // PlanScan returns a ScanPlan for scanning a PostgreSQL value into a destination with the same type as target. If
+ // no plan can be found then nil is returned.
+ PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan
+
+ // DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface.
+ DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error)
+
+ // DecodeValue returns src decoded into its default format.
+ DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error)
+}
+
+type nullAssignmentError struct {
+ dst any
+}
+
+func (e *nullAssignmentError) Error() string {
+ return fmt.Sprintf("cannot assign NULL to %T", e.dst)
+}
+
+// Type represents a PostgreSQL data type. It must not be mutated after it is registered with a Map.
+type Type struct {
+ Codec Codec
+ Name string
+ OID uint32
+}
+
+// Map is the mapping between PostgreSQL server types and Go type handling logic. It can encode values for
+// transmission to a PostgreSQL server and scan received values.
+type Map struct {
+ oidToType map[uint32]*Type
+ nameToType map[string]*Type
+ reflectTypeToName map[reflect.Type]string
+ oidToFormatCode map[uint32]int16
+
+ reflectTypeToType map[reflect.Type]*Type
+
+ memoizedScanPlans map[uint32]map[reflect.Type][2]ScanPlan
+ memoizedEncodePlans map[uint32]map[reflect.Type][2]EncodePlan
+
+ // TryWrapEncodePlanFuncs is a slice of functions that will wrap a value that cannot be encoded by the Codec. Every
+ // time a wrapper is found the PlanEncode method will be recursively called with the new value. This allows several layers of wrappers
+ // to be built up. There are default functions placed in this slice by NewMap(). In most cases these functions
+ // should run last. i.e. Additional functions should typically be prepended not appended.
+ TryWrapEncodePlanFuncs []TryWrapEncodePlanFunc
+
+ // TryWrapScanPlanFuncs is a slice of functions that will wrap a target that cannot be scanned into by the Codec. Every
+ // time a wrapper is found the PlanScan method will be recursively called with the new target. This allows several layers of wrappers
+ // to be built up. There are default functions placed in this slice by NewMap(). In most cases these functions
+ // should run last. i.e. Additional functions should typically be prepended not appended.
+ TryWrapScanPlanFuncs []TryWrapScanPlanFunc
+}
+
+func NewMap() *Map {
+ defaultMapInitOnce.Do(initDefaultMap)
+
+ return &Map{
+ oidToType: make(map[uint32]*Type),
+ nameToType: make(map[string]*Type),
+ reflectTypeToName: make(map[reflect.Type]string),
+ oidToFormatCode: make(map[uint32]int16),
+
+ memoizedScanPlans: make(map[uint32]map[reflect.Type][2]ScanPlan),
+ memoizedEncodePlans: make(map[uint32]map[reflect.Type][2]EncodePlan),
+
+ TryWrapEncodePlanFuncs: []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapBuiltinTypeEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ TryWrapStructEncodePlan,
+ TryWrapSliceEncodePlan,
+ TryWrapMultiDimSliceEncodePlan,
+ TryWrapArrayEncodePlan,
+ },
+
+ TryWrapScanPlanFuncs: []TryWrapScanPlanFunc{
+ TryPointerPointerScanPlan,
+ TryWrapBuiltinTypeScanPlan,
+ TryFindUnderlyingTypeScanPlan,
+ TryWrapStructScanPlan,
+ TryWrapPtrSliceScanPlan,
+ TryWrapPtrMultiDimSliceScanPlan,
+ TryWrapPtrArrayScanPlan,
+ },
+ }
+}
+
+// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
+func (m *Map) RegisterType(t *Type) {
+ m.oidToType[t.OID] = t
+ m.nameToType[t.Name] = t
+ m.oidToFormatCode[t.OID] = t.Codec.PreferredFormat()
+
+ // Invalidated by type registration
+ m.reflectTypeToType = nil
+ for k := range m.memoizedScanPlans {
+ delete(m.memoizedScanPlans, k)
+ }
+ for k := range m.memoizedEncodePlans {
+ delete(m.memoizedEncodePlans, k)
+ }
+}
+
+// RegisterDefaultPgType registers a mapping of a Go type to a PostgreSQL type name. Typically the data type to be
+// encoded or decoded is determined by the PostgreSQL OID. But if the OID of a value to be encoded or decoded is
+// unknown, this additional mapping will be used by TypeForValue to determine a suitable data type.
+func (m *Map) RegisterDefaultPgType(value any, name string) {
+ m.reflectTypeToName[reflect.TypeOf(value)] = name
+
+ // Invalidated by type registration
+ m.reflectTypeToType = nil
+ for k := range m.memoizedScanPlans {
+ delete(m.memoizedScanPlans, k)
+ }
+ for k := range m.memoizedEncodePlans {
+ delete(m.memoizedEncodePlans, k)
+ }
+}
+
+// TypeForOID returns the Type registered for the given OID. The returned Type must not be mutated.
+func (m *Map) TypeForOID(oid uint32) (*Type, bool) {
+ if dt, ok := m.oidToType[oid]; ok {
+ return dt, true
+ }
+
+ dt, ok := defaultMap.oidToType[oid]
+ return dt, ok
+}
+
+// TypeForName returns the Type registered for the given name. The returned Type must not be mutated.
+func (m *Map) TypeForName(name string) (*Type, bool) {
+ if dt, ok := m.nameToType[name]; ok {
+ return dt, true
+ }
+ dt, ok := defaultMap.nameToType[name]
+ return dt, ok
+}
+
+func (m *Map) buildReflectTypeToType() {
+ m.reflectTypeToType = make(map[reflect.Type]*Type)
+
+ for reflectType, name := range m.reflectTypeToName {
+ if dt, ok := m.TypeForName(name); ok {
+ m.reflectTypeToType[reflectType] = dt
+ }
+ }
+}
+
+// TypeForValue finds a data type suitable for v. Use RegisterType to register types that can encode and decode
+// themselves. Use RegisterDefaultPgType to register that can be handled by a registered data type. The returned Type
+// must not be mutated.
+func (m *Map) TypeForValue(v any) (*Type, bool) {
+ if m.reflectTypeToType == nil {
+ m.buildReflectTypeToType()
+ }
+
+ if dt, ok := m.reflectTypeToType[reflect.TypeOf(v)]; ok {
+ return dt, true
+ }
+
+ dt, ok := defaultMap.reflectTypeToType[reflect.TypeOf(v)]
+ return dt, ok
+}
+
+// FormatCodeForOID returns the preferred format code for type oid. If the type is not registered it returns the text
+// format code.
+func (m *Map) FormatCodeForOID(oid uint32) int16 {
+ if fc, ok := m.oidToFormatCode[oid]; ok {
+ return fc
+ }
+
+ if fc, ok := defaultMap.oidToFormatCode[oid]; ok {
+ return fc
+ }
+
+ return TextFormatCode
+}
+
+// EncodePlan is a precompiled plan to encode a particular type into a particular OID and format.
+type EncodePlan interface {
+ // Encode appends the encoded bytes of value to buf. If value is the SQL value NULL then append nothing and return
+ // (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data
+ // written.
+ Encode(value any, buf []byte) (newBuf []byte, err error)
+}
+
+// ScanPlan is a precompiled plan to scan into a type of destination.
+type ScanPlan interface {
+ // Scan scans src into target. src is only valid during the call to Scan. The ScanPlan must not retain a reference to
+ // src.
+ Scan(src []byte, target any) error
+}
+
+type scanPlanCodecSQLScanner struct {
+ c Codec
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *scanPlanCodecSQLScanner) Scan(src []byte, dst any) error {
+ value, err := plan.c.DecodeDatabaseSQLValue(plan.m, plan.oid, plan.formatCode, src)
+ if err != nil {
+ return err
+ }
+
+ scanner := dst.(sql.Scanner)
+ return scanner.Scan(value)
+}
+
+type scanPlanSQLScanner struct {
+ formatCode int16
+}
+
+func (plan *scanPlanSQLScanner) Scan(src []byte, dst any) error {
+ scanner := dst.(sql.Scanner)
+ if src == nil {
+ // This is necessary because interface value []byte:nil does not equal nil:nil for the binary format path and the
+ // text format path would be converted to empty string.
+ return scanner.Scan(nil)
+ } else if plan.formatCode == BinaryFormatCode {
+ return scanner.Scan(src)
+ } else {
+ return scanner.Scan(string(src))
+ }
+}
+
+type scanPlanString struct{}
+
+func (scanPlanString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = string(src)
+ return nil
+}
+
+type scanPlanAnyTextToBytes struct{}
+
+func (scanPlanAnyTextToBytes) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanFail struct {
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *scanPlanFail) Scan(src []byte, dst any) error {
+ // If src is NULL it might be possible to scan into dst even though it is the types are not compatible. While this
+ // may seem to be a contrived case it can occur when selecting NULL directly. PostgreSQL assigns it the type of text.
+ // It would be surprising to the caller to have to cast the NULL (e.g. `select null::int`). So try to figure out a
+ // compatible data type for dst and scan with that.
+ //
+ // See https://github.com/jackc/pgx/issues/1326
+ if src == nil {
+ // As a horrible hack try all types to find anything that can scan into dst.
+ for oid := range plan.m.oidToType {
+ // using planScan instead of Scan or PlanScan to avoid polluting the planned scan cache.
+ plan := plan.m.planScan(oid, plan.formatCode, dst)
+ if _, ok := plan.(*scanPlanFail); !ok {
+ return plan.Scan(src, dst)
+ }
+ }
+ for oid := range defaultMap.oidToType {
+ if _, ok := plan.m.oidToType[oid]; !ok {
+ plan := plan.m.planScan(oid, plan.formatCode, dst)
+ if _, ok := plan.(*scanPlanFail); !ok {
+ return plan.Scan(src, dst)
+ }
+ }
+ }
+ }
+
+ var format string
+ switch plan.formatCode {
+ case TextFormatCode:
+ format = "text"
+ case BinaryFormatCode:
+ format = "binary"
+ default:
+ format = fmt.Sprintf("unknown %d", plan.formatCode)
+ }
+
+ var dataTypeName string
+ if t, ok := plan.m.TypeForOID(plan.oid); ok {
+ dataTypeName = t.Name
+ } else {
+ dataTypeName = "unknown type"
+ }
+
+ return fmt.Errorf("cannot scan %s (OID %d) in %v format into %T", dataTypeName, plan.oid, format, dst)
+}
+
+// TryWrapScanPlanFunc is a function that tries to create a wrapper plan for target. If successful it returns a plan
+// that will convert the target passed to Scan and then call the next plan. nextTarget is target as it will be converted
+// by plan. It must be used to find another suitable ScanPlan. When it is found SetNext must be called on plan for it
+// to be usabled. ok indicates if a suitable wrapper was found.
+type TryWrapScanPlanFunc func(target any) (plan WrappedScanPlanNextSetter, nextTarget any, ok bool)
+
+type pointerPointerScanPlan struct {
+ dstType reflect.Type
+ next ScanPlan
+}
+
+func (plan *pointerPointerScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *pointerPointerScanPlan) Scan(src []byte, dst any) error {
+ el := reflect.ValueOf(dst).Elem()
+ if src == nil {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+
+ el.Set(reflect.New(el.Type().Elem()))
+ return plan.next.Scan(src, el.Interface())
+}
+
+// TryPointerPointerScanPlan handles a pointer to a pointer by setting the target to nil for SQL NULL and allocating and
+// scanning for non-NULL.
+func TryPointerPointerScanPlan(target any) (plan WrappedScanPlanNextSetter, nextTarget any, ok bool) {
+ if dstValue := reflect.ValueOf(target); dstValue.Kind() == reflect.Ptr {
+ elemValue := dstValue.Elem()
+ if elemValue.Kind() == reflect.Ptr {
+ plan = &pointerPointerScanPlan{dstType: dstValue.Type()}
+ return plan, reflect.Zero(elemValue.Type()).Interface(), true
+ }
+ }
+
+ return nil, nil, false
+}
+
+// SkipUnderlyingTypePlanner prevents PlanScan and PlanDecode from trying to use the underlying type.
+type SkipUnderlyingTypePlanner interface {
+ SkipUnderlyingTypePlan()
+}
+
+var elemKindToPointerTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
+ reflect.Int: reflect.TypeOf(new(int)),
+ reflect.Int8: reflect.TypeOf(new(int8)),
+ reflect.Int16: reflect.TypeOf(new(int16)),
+ reflect.Int32: reflect.TypeOf(new(int32)),
+ reflect.Int64: reflect.TypeOf(new(int64)),
+ reflect.Uint: reflect.TypeOf(new(uint)),
+ reflect.Uint8: reflect.TypeOf(new(uint8)),
+ reflect.Uint16: reflect.TypeOf(new(uint16)),
+ reflect.Uint32: reflect.TypeOf(new(uint32)),
+ reflect.Uint64: reflect.TypeOf(new(uint64)),
+ reflect.Float32: reflect.TypeOf(new(float32)),
+ reflect.Float64: reflect.TypeOf(new(float64)),
+ reflect.String: reflect.TypeOf(new(string)),
+ reflect.Bool: reflect.TypeOf(new(bool)),
+}
+
+type underlyingTypeScanPlan struct {
+ dstType reflect.Type
+ nextDstType reflect.Type
+ next ScanPlan
+}
+
+func (plan *underlyingTypeScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *underlyingTypeScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, reflect.ValueOf(dst).Convert(plan.nextDstType).Interface())
+}
+
+// TryFindUnderlyingTypeScanPlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
+// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
+func TryFindUnderlyingTypeScanPlan(dst any) (plan WrappedScanPlanNextSetter, nextDst any, ok bool) {
+ if _, ok := dst.(SkipUnderlyingTypePlanner); ok {
+ return nil, nil, false
+ }
+
+ dstValue := reflect.ValueOf(dst)
+
+ if dstValue.Kind() == reflect.Ptr {
+ var elemValue reflect.Value
+ if dstValue.IsNil() {
+ elemValue = reflect.New(dstValue.Type().Elem()).Elem()
+ } else {
+ elemValue = dstValue.Elem()
+ }
+ nextDstType := elemKindToPointerTypes[elemValue.Kind()]
+ if nextDstType == nil && elemValue.Kind() == reflect.Slice {
+ if elemValue.Type().Elem().Kind() == reflect.Uint8 {
+ var v *[]byte
+ nextDstType = reflect.TypeOf(v)
+ }
+ }
+
+ if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
+ return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
+ }
+
+ }
+
+ return nil, nil, false
+}
+
+type WrappedScanPlanNextSetter interface {
+ SetNext(ScanPlan)
+ ScanPlan
+}
+
+// TryWrapBuiltinTypeScanPlan tries to wrap a builtin type with a wrapper that provides additional methods. e.g. If
+// value was of type int32 then a wrapper plan would be returned that converts target to a value that implements
+// Int64Scanner.
+func TryWrapBuiltinTypeScanPlan(target any) (plan WrappedScanPlanNextSetter, nextDst any, ok bool) {
+ switch target := target.(type) {
+ case *int8:
+ return &wrapInt8ScanPlan{}, (*int8Wrapper)(target), true
+ case *int16:
+ return &wrapInt16ScanPlan{}, (*int16Wrapper)(target), true
+ case *int32:
+ return &wrapInt32ScanPlan{}, (*int32Wrapper)(target), true
+ case *int64:
+ return &wrapInt64ScanPlan{}, (*int64Wrapper)(target), true
+ case *int:
+ return &wrapIntScanPlan{}, (*intWrapper)(target), true
+ case *uint8:
+ return &wrapUint8ScanPlan{}, (*uint8Wrapper)(target), true
+ case *uint16:
+ return &wrapUint16ScanPlan{}, (*uint16Wrapper)(target), true
+ case *uint32:
+ return &wrapUint32ScanPlan{}, (*uint32Wrapper)(target), true
+ case *uint64:
+ return &wrapUint64ScanPlan{}, (*uint64Wrapper)(target), true
+ case *uint:
+ return &wrapUintScanPlan{}, (*uintWrapper)(target), true
+ case *float32:
+ return &wrapFloat32ScanPlan{}, (*float32Wrapper)(target), true
+ case *float64:
+ return &wrapFloat64ScanPlan{}, (*float64Wrapper)(target), true
+ case *string:
+ return &wrapStringScanPlan{}, (*stringWrapper)(target), true
+ case *time.Time:
+ return &wrapTimeScanPlan{}, (*timeWrapper)(target), true
+ case *time.Duration:
+ return &wrapDurationScanPlan{}, (*durationWrapper)(target), true
+ case *net.IPNet:
+ return &wrapNetIPNetScanPlan{}, (*netIPNetWrapper)(target), true
+ case *net.IP:
+ return &wrapNetIPScanPlan{}, (*netIPWrapper)(target), true
+ case *netip.Prefix:
+ return &wrapNetipPrefixScanPlan{}, (*netipPrefixWrapper)(target), true
+ case *netip.Addr:
+ return &wrapNetipAddrScanPlan{}, (*netipAddrWrapper)(target), true
+ case *map[string]*string:
+ return &wrapMapStringToPointerStringScanPlan{}, (*mapStringToPointerStringWrapper)(target), true
+ case *map[string]string:
+ return &wrapMapStringToStringScanPlan{}, (*mapStringToStringWrapper)(target), true
+ case *[16]byte:
+ return &wrapByte16ScanPlan{}, (*byte16Wrapper)(target), true
+ case *[]byte:
+ return &wrapByteSliceScanPlan{}, (*byteSliceWrapper)(target), true
+ }
+
+ return nil, nil, false
+}
+
+type wrapInt8ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt8ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt8ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int8Wrapper)(dst.(*int8)))
+}
+
+type wrapInt16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int16Wrapper)(dst.(*int16)))
+}
+
+type wrapInt32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int32Wrapper)(dst.(*int32)))
+}
+
+type wrapInt64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapInt64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapInt64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*int64Wrapper)(dst.(*int64)))
+}
+
+type wrapIntScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapIntScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapIntScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*intWrapper)(dst.(*int)))
+}
+
+type wrapUint8ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint8ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint8ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint8Wrapper)(dst.(*uint8)))
+}
+
+type wrapUint16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint16Wrapper)(dst.(*uint16)))
+}
+
+type wrapUint32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint32Wrapper)(dst.(*uint32)))
+}
+
+type wrapUint64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUint64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUint64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uint64Wrapper)(dst.(*uint64)))
+}
+
+type wrapUintScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapUintScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapUintScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*uintWrapper)(dst.(*uint)))
+}
+
+type wrapFloat32ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapFloat32ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapFloat32ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*float32Wrapper)(dst.(*float32)))
+}
+
+type wrapFloat64ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapFloat64ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapFloat64ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*float64Wrapper)(dst.(*float64)))
+}
+
+type wrapStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*stringWrapper)(dst.(*string)))
+}
+
+type wrapTimeScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapTimeScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapTimeScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*timeWrapper)(dst.(*time.Time)))
+}
+
+type wrapDurationScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapDurationScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapDurationScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*durationWrapper)(dst.(*time.Duration)))
+}
+
+type wrapNetIPNetScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetIPNetScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetIPNetScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netIPNetWrapper)(dst.(*net.IPNet)))
+}
+
+type wrapNetIPScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetIPScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetIPScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netIPWrapper)(dst.(*net.IP)))
+}
+
+type wrapNetipPrefixScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetipPrefixScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetipPrefixScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netipPrefixWrapper)(dst.(*netip.Prefix)))
+}
+
+type wrapNetipAddrScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapNetipAddrScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapNetipAddrScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*netipAddrWrapper)(dst.(*netip.Addr)))
+}
+
+type wrapMapStringToPointerStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapMapStringToPointerStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapMapStringToPointerStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*mapStringToPointerStringWrapper)(dst.(*map[string]*string)))
+}
+
+type wrapMapStringToStringScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapMapStringToStringScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapMapStringToStringScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*mapStringToStringWrapper)(dst.(*map[string]string)))
+}
+
+type wrapByte16ScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapByte16ScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapByte16ScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*byte16Wrapper)(dst.(*[16]byte)))
+}
+
+type wrapByteSliceScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapByteSliceScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapByteSliceScanPlan) Scan(src []byte, dst any) error {
+ return plan.next.Scan(src, (*byteSliceWrapper)(dst.(*[]byte)))
+}
+
+type pointerEmptyInterfaceScanPlan struct {
+ codec Codec
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *pointerEmptyInterfaceScanPlan) Scan(src []byte, dst any) error {
+ value, err := plan.codec.DecodeValue(plan.m, plan.oid, plan.formatCode, src)
+ if err != nil {
+ return err
+ }
+
+ ptrAny := dst.(*any)
+ *ptrAny = value
+
+ return nil
+}
+
+// TryWrapStructPlan tries to wrap a struct with a wrapper that implements CompositeIndexGetter.
+func TryWrapStructScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ var targetElemValue reflect.Value
+ if targetValue.IsNil() {
+ targetElemValue = reflect.Zero(targetValue.Type().Elem())
+ } else {
+ targetElemValue = targetValue.Elem()
+ }
+ targetElemType := targetElemValue.Type()
+
+ if targetElemType.Kind() == reflect.Struct {
+ exportedFields := getExportedFieldValues(targetElemValue)
+ if len(exportedFields) == 0 {
+ return nil, nil, false
+ }
+
+ w := ptrStructWrapper{
+ s: target,
+ exportedFields: exportedFields,
+ }
+ return &wrapAnyPtrStructScanPlan{}, &w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapAnyPtrStructScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapAnyPtrStructScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapAnyPtrStructScanPlan) Scan(src []byte, target any) error {
+ w := ptrStructWrapper{
+ s: target,
+ exportedFields: getExportedFieldValues(reflect.ValueOf(target).Elem()),
+ }
+
+ return plan.next.Scan(src, &w)
+}
+
+// TryWrapPtrSliceScanPlan tries to wrap a pointer to a single dimension slice.
+func TryWrapPtrSliceScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ // Avoid using reflect path for common types.
+ switch target := target.(type) {
+ case *[]int16:
+ return &wrapPtrSliceScanPlan[int16]{}, (*FlatArray[int16])(target), true
+ case *[]int32:
+ return &wrapPtrSliceScanPlan[int32]{}, (*FlatArray[int32])(target), true
+ case *[]int64:
+ return &wrapPtrSliceScanPlan[int64]{}, (*FlatArray[int64])(target), true
+ case *[]float32:
+ return &wrapPtrSliceScanPlan[float32]{}, (*FlatArray[float32])(target), true
+ case *[]float64:
+ return &wrapPtrSliceScanPlan[float64]{}, (*FlatArray[float64])(target), true
+ case *[]string:
+ return &wrapPtrSliceScanPlan[string]{}, (*FlatArray[string])(target), true
+ case *[]time.Time:
+ return &wrapPtrSliceScanPlan[time.Time]{}, (*FlatArray[time.Time])(target), true
+ }
+
+ targetType := reflect.TypeOf(target)
+ if targetType.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemType := targetType.Elem()
+
+ if targetElemType.Kind() == reflect.Slice {
+ slice := reflect.New(targetElemType).Elem()
+ return &wrapPtrSliceReflectScanPlan{}, &anySliceArrayReflect{slice: slice}, true
+ }
+ return nil, nil, false
+}
+
+type wrapPtrSliceScanPlan[T any] struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrSliceScanPlan[T]) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrSliceScanPlan[T]) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, (*FlatArray[T])(target.(*[]T)))
+}
+
+type wrapPtrSliceReflectScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrSliceReflectScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrSliceReflectScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anySliceArrayReflect{slice: reflect.ValueOf(target).Elem()})
+}
+
+// TryWrapPtrMultiDimSliceScanPlan tries to wrap a pointer to a multi-dimension slice.
+func TryWrapPtrMultiDimSliceScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemValue := targetValue.Elem()
+
+ if targetElemValue.Kind() == reflect.Slice {
+ elemElemKind := targetElemValue.Type().Elem().Kind()
+ if elemElemKind == reflect.Slice {
+ if !isRagged(targetElemValue) {
+ return &wrapPtrMultiDimSliceScanPlan{}, &anyMultiDimSliceArray{slice: targetValue.Elem()}, true
+ }
+ }
+ }
+
+ return nil, nil, false
+}
+
+type wrapPtrMultiDimSliceScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrMultiDimSliceScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrMultiDimSliceScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anyMultiDimSliceArray{slice: reflect.ValueOf(target).Elem()})
+}
+
+// TryWrapPtrArrayScanPlan tries to wrap a pointer to a single dimension array.
+func TryWrapPtrArrayScanPlan(target any) (plan WrappedScanPlanNextSetter, nextValue any, ok bool) {
+ targetValue := reflect.ValueOf(target)
+ if targetValue.Kind() != reflect.Ptr {
+ return nil, nil, false
+ }
+
+ targetElemValue := targetValue.Elem()
+
+ if targetElemValue.Kind() == reflect.Array {
+ return &wrapPtrArrayReflectScanPlan{}, &anyArrayArrayReflect{array: targetElemValue}, true
+ }
+ return nil, nil, false
+}
+
+type wrapPtrArrayReflectScanPlan struct {
+ next ScanPlan
+}
+
+func (plan *wrapPtrArrayReflectScanPlan) SetNext(next ScanPlan) { plan.next = next }
+
+func (plan *wrapPtrArrayReflectScanPlan) Scan(src []byte, target any) error {
+ return plan.next.Scan(src, &anyArrayArrayReflect{array: reflect.ValueOf(target).Elem()})
+}
+
+// PlanScan prepares a plan to scan a value into target.
+func (m *Map) PlanScan(oid uint32, formatCode int16, target any) ScanPlan {
+ oidMemo := m.memoizedScanPlans[oid]
+ if oidMemo == nil {
+ oidMemo = make(map[reflect.Type][2]ScanPlan)
+ m.memoizedScanPlans[oid] = oidMemo
+ }
+ targetReflectType := reflect.TypeOf(target)
+ typeMemo := oidMemo[targetReflectType]
+ plan := typeMemo[formatCode]
+ if plan == nil {
+ plan = m.planScan(oid, formatCode, target)
+ typeMemo[formatCode] = plan
+ oidMemo[targetReflectType] = typeMemo
+ }
+
+ return plan
+}
+
+func (m *Map) planScan(oid uint32, formatCode int16, target any) ScanPlan {
+ if target == nil {
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+ }
+
+ if _, ok := target.(*UndecodedBytes); ok {
+ return scanPlanAnyToUndecodedBytes{}
+ }
+
+ switch formatCode {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ switch oid {
+ case TextOID, VarcharOID:
+ return scanPlanString{}
+ }
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanString{}
+ case *[]byte:
+ if oid != ByteaOID {
+ return scanPlanAnyTextToBytes{}
+ }
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ var dt *Type
+
+ if dataType, ok := m.TypeForOID(oid); ok {
+ dt = dataType
+ } else if dataType, ok := m.TypeForValue(target); ok {
+ dt = dataType
+ oid = dt.OID // Preserve assumed OID in case we are recursively called below.
+ }
+
+ if dt != nil {
+ if plan := dt.Codec.PlanScan(m, oid, formatCode, target); plan != nil {
+ return plan
+ }
+ }
+
+ // This needs to happen before trying m.TryWrapScanPlanFuncs. Otherwise, a sql.Scanner would not get called if it was
+ // defined on a type that could be unwrapped such as `type myString string`.
+ //
+ // https://github.com/jackc/pgtype/issues/197
+ if _, ok := target.(sql.Scanner); ok {
+ if dt == nil {
+ return &scanPlanSQLScanner{formatCode: formatCode}
+ } else {
+ return &scanPlanCodecSQLScanner{c: dt.Codec, m: m, oid: oid, formatCode: formatCode}
+ }
+ }
+
+ for _, f := range m.TryWrapScanPlanFuncs {
+ if wrapperPlan, nextDst, ok := f(target); ok {
+ if nextPlan := m.planScan(oid, formatCode, nextDst); nextPlan != nil {
+ if _, failed := nextPlan.(*scanPlanFail); !failed {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+ }
+
+ if dt != nil {
+ if _, ok := target.(*any); ok {
+ return &pointerEmptyInterfaceScanPlan{codec: dt.Codec, m: m, oid: oid, formatCode: formatCode}
+ }
+ }
+
+ return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
+}
+
+func (m *Map) Scan(oid uint32, formatCode int16, src []byte, dst any) error {
+ if dst == nil {
+ return nil
+ }
+
+ plan := m.PlanScan(oid, formatCode, dst)
+ return plan.Scan(src, dst)
+}
+
+var ErrScanTargetTypeChanged = errors.New("scan target type changed")
+
+func codecScan(codec Codec, m *Map, oid uint32, format int16, src []byte, dst any) error {
+ scanPlan := codec.PlanScan(m, oid, format, dst)
+ if scanPlan == nil {
+ return fmt.Errorf("PlanScan did not find a plan")
+ }
+ return scanPlan.Scan(src, dst)
+}
+
+func codecDecodeToTextFormat(codec Codec, m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ if format == TextFormatCode {
+ return string(src), nil
+ } else {
+ value, err := codec.DecodeValue(m, oid, format, src)
+ if err != nil {
+ return nil, err
+ }
+ buf, err := m.Encode(oid, TextFormatCode, value, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+ }
+}
+
+// PlanEncode returns an Encode plan for encoding value into PostgreSQL format for oid and format. If no plan can be
+// found then nil is returned.
+func (m *Map) PlanEncode(oid uint32, format int16, value any) EncodePlan {
+ oidMemo := m.memoizedEncodePlans[oid]
+ if oidMemo == nil {
+ oidMemo = make(map[reflect.Type][2]EncodePlan)
+ m.memoizedEncodePlans[oid] = oidMemo
+ }
+ targetReflectType := reflect.TypeOf(value)
+ typeMemo := oidMemo[targetReflectType]
+ plan := typeMemo[format]
+ if plan == nil {
+ plan = m.planEncode(oid, format, value)
+ typeMemo[format] = plan
+ oidMemo[targetReflectType] = typeMemo
+ }
+
+ return plan
+}
+
+func (m *Map) planEncode(oid uint32, format int16, value any) EncodePlan {
+ if format == TextFormatCode {
+ switch value.(type) {
+ case string:
+ return encodePlanStringToAnyTextFormat{}
+ case TextValuer:
+ return encodePlanTextValuerToAnyTextFormat{}
+ }
+ }
+
+ var dt *Type
+ if dataType, ok := m.TypeForOID(oid); ok {
+ dt = dataType
+ } else {
+ // If no type for the OID was found, then either it is unknowable (e.g. the simple protocol) or it is an
+ // unregistered type. In either case try to find the type and OID that matches the value (e.g. a []byte would be
+ // registered to PostgreSQL bytea).
+ if dataType, ok := m.TypeForValue(value); ok {
+ dt = dataType
+ oid = dt.OID // Preserve assumed OID in case we are recursively called below.
+ }
+ }
+
+ if dt != nil {
+ if plan := dt.Codec.PlanEncode(m, oid, format, value); plan != nil {
+ return plan
+ }
+ }
+
+ for _, f := range m.TryWrapEncodePlanFuncs {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := m.PlanEncode(oid, format, nextValue); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+ }
+
+ return nil
+}
+
+type encodePlanStringToAnyTextFormat struct{}
+
+func (encodePlanStringToAnyTextFormat) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(string)
+ return append(buf, s...), nil
+}
+
+type encodePlanTextValuerToAnyTextFormat struct{}
+
+func (encodePlanTextValuerToAnyTextFormat) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+ if !t.Valid {
+ return nil, nil
+ }
+
+ return append(buf, t.String...), nil
+}
+
+type encodePlanDriverValuer struct {
+ m *Map
+ oid uint32
+ formatCode int16
+}
+
+func (plan *encodePlanDriverValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ dv := value.(driver.Valuer)
+ if dv == nil {
+ return nil, nil
+ }
+ v, err := dv.Value()
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+
+ newBuf, err = plan.m.Encode(plan.oid, plan.formatCode, v, buf)
+ if err == nil {
+ return newBuf, nil
+ }
+
+ s, ok := v.(string)
+ if !ok {
+ return nil, err
+ }
+
+ var scannedValue any
+ scanErr := plan.m.Scan(plan.oid, TextFormatCode, []byte(s), &scannedValue)
+ if scanErr != nil {
+ return nil, err
+ }
+
+ // Prevent infinite loop. We can't encode this. See https://github.com/jackc/pgx/issues/1331.
+ if reflect.TypeOf(value) == reflect.TypeOf(scannedValue) {
+ return nil, fmt.Errorf("tried to encode %v via encoding to text and scanning but failed due to receiving same type back", value)
+ }
+
+ var err2 error
+ newBuf, err2 = plan.m.Encode(plan.oid, BinaryFormatCode, scannedValue, buf)
+ if err2 != nil {
+ return nil, err
+ }
+
+ return newBuf, nil
+}
+
+// TryWrapEncodePlanFunc is a function that tries to create a wrapper plan for value. If successful it returns a plan
+// that will convert the value passed to Encode and then call the next plan. nextValue is value as it will be converted
+// by plan. It must be used to find another suitable EncodePlan. When it is found SetNext must be called on plan for it
+// to be usabled. ok indicates if a suitable wrapper was found.
+type TryWrapEncodePlanFunc func(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool)
+
+type derefPointerEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *derefPointerEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *derefPointerEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ptr := reflect.ValueOf(value)
+
+ if ptr.IsNil() {
+ return nil, nil
+ }
+
+ return plan.next.Encode(ptr.Elem().Interface(), buf)
+}
+
+// TryWrapDerefPointerEncodePlan tries to dereference a pointer. e.g. If value was of type *string then a wrapper plan
+// would be returned that dereferences the value.
+func TryWrapDerefPointerEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Ptr {
+ return &derefPointerEncodePlan{}, reflect.New(valueType.Elem()).Elem().Interface(), true
+ }
+
+ return nil, nil, false
+}
+
+var kindToTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Bool: reflect.TypeOf(false),
+}
+
+var byteSliceType = reflect.TypeOf([]byte{})
+
+type underlyingTypeEncodePlan struct {
+ nextValueType reflect.Type
+ next EncodePlan
+}
+
+func (plan *underlyingTypeEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *underlyingTypeEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(reflect.ValueOf(value).Convert(plan.nextValueType).Interface(), buf)
+}
+
+// TryWrapFindUnderlyingTypeEncodePlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
+// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
+func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if value == nil {
+ return nil, nil, false
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if _, ok := value.(SkipUnderlyingTypePlanner); ok {
+ return nil, nil, false
+ }
+
+ refValue := reflect.ValueOf(value)
+
+ nextValueType := kindToTypes[refValue.Kind()]
+ if nextValueType != nil && refValue.Type() != nextValueType {
+ return &underlyingTypeEncodePlan{nextValueType: nextValueType}, refValue.Convert(nextValueType).Interface(), true
+ }
+
+ // []byte is a special case. It is a slice but we treat it as a scalar type. In the case of a named type like
+ // json.RawMessage which is defined as []byte the underlying type should be considered as []byte. But any other slice
+ // does not have a special underlying type.
+ //
+ // https://github.com/jackc/pgx/issues/1763
+ if refValue.Type() != byteSliceType && refValue.Type().AssignableTo(byteSliceType) {
+ return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
+ }
+
+ return nil, nil, false
+}
+
+type WrappedEncodePlanNextSetter interface {
+ SetNext(EncodePlan)
+ EncodePlan
+}
+
+// TryWrapBuiltinTypeEncodePlan tries to wrap a builtin type with a wrapper that provides additional methods. e.g. If
+// value was of type int32 then a wrapper plan would be returned that converts value to a type that implements
+// Int64Valuer.
+func TryWrapBuiltinTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ switch value := value.(type) {
+ case int8:
+ return &wrapInt8EncodePlan{}, int8Wrapper(value), true
+ case int16:
+ return &wrapInt16EncodePlan{}, int16Wrapper(value), true
+ case int32:
+ return &wrapInt32EncodePlan{}, int32Wrapper(value), true
+ case int64:
+ return &wrapInt64EncodePlan{}, int64Wrapper(value), true
+ case int:
+ return &wrapIntEncodePlan{}, intWrapper(value), true
+ case uint8:
+ return &wrapUint8EncodePlan{}, uint8Wrapper(value), true
+ case uint16:
+ return &wrapUint16EncodePlan{}, uint16Wrapper(value), true
+ case uint32:
+ return &wrapUint32EncodePlan{}, uint32Wrapper(value), true
+ case uint64:
+ return &wrapUint64EncodePlan{}, uint64Wrapper(value), true
+ case uint:
+ return &wrapUintEncodePlan{}, uintWrapper(value), true
+ case float32:
+ return &wrapFloat32EncodePlan{}, float32Wrapper(value), true
+ case float64:
+ return &wrapFloat64EncodePlan{}, float64Wrapper(value), true
+ case string:
+ return &wrapStringEncodePlan{}, stringWrapper(value), true
+ case time.Time:
+ return &wrapTimeEncodePlan{}, timeWrapper(value), true
+ case time.Duration:
+ return &wrapDurationEncodePlan{}, durationWrapper(value), true
+ case net.IPNet:
+ return &wrapNetIPNetEncodePlan{}, netIPNetWrapper(value), true
+ case net.IP:
+ return &wrapNetIPEncodePlan{}, netIPWrapper(value), true
+ case netip.Prefix:
+ return &wrapNetipPrefixEncodePlan{}, netipPrefixWrapper(value), true
+ case netip.Addr:
+ return &wrapNetipAddrEncodePlan{}, netipAddrWrapper(value), true
+ case map[string]*string:
+ return &wrapMapStringToPointerStringEncodePlan{}, mapStringToPointerStringWrapper(value), true
+ case map[string]string:
+ return &wrapMapStringToStringEncodePlan{}, mapStringToStringWrapper(value), true
+ case [16]byte:
+ return &wrapByte16EncodePlan{}, byte16Wrapper(value), true
+ case []byte:
+ return &wrapByteSliceEncodePlan{}, byteSliceWrapper(value), true
+ case fmt.Stringer:
+ return &wrapFmtStringerEncodePlan{}, fmtStringerWrapper{value}, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapInt8EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt8EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt8EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int8Wrapper(value.(int8)), buf)
+}
+
+type wrapInt16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int16Wrapper(value.(int16)), buf)
+}
+
+type wrapInt32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int32Wrapper(value.(int32)), buf)
+}
+
+type wrapInt64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapInt64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapInt64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(int64Wrapper(value.(int64)), buf)
+}
+
+type wrapIntEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapIntEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapIntEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(intWrapper(value.(int)), buf)
+}
+
+type wrapUint8EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint8EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint8EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint8Wrapper(value.(uint8)), buf)
+}
+
+type wrapUint16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint16Wrapper(value.(uint16)), buf)
+}
+
+type wrapUint32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint32Wrapper(value.(uint32)), buf)
+}
+
+type wrapUint64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUint64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUint64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uint64Wrapper(value.(uint64)), buf)
+}
+
+type wrapUintEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapUintEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapUintEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(uintWrapper(value.(uint)), buf)
+}
+
+type wrapFloat32EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFloat32EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFloat32EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(float32Wrapper(value.(float32)), buf)
+}
+
+type wrapFloat64EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFloat64EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFloat64EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(float64Wrapper(value.(float64)), buf)
+}
+
+type wrapStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(stringWrapper(value.(string)), buf)
+}
+
+type wrapTimeEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapTimeEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapTimeEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(timeWrapper(value.(time.Time)), buf)
+}
+
+type wrapDurationEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapDurationEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapDurationEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(durationWrapper(value.(time.Duration)), buf)
+}
+
+type wrapNetIPNetEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetIPNetEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetIPNetEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netIPNetWrapper(value.(net.IPNet)), buf)
+}
+
+type wrapNetIPEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetIPEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetIPEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netIPWrapper(value.(net.IP)), buf)
+}
+
+type wrapNetipPrefixEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetipPrefixEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetipPrefixEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netipPrefixWrapper(value.(netip.Prefix)), buf)
+}
+
+type wrapNetipAddrEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapNetipAddrEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapNetipAddrEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(netipAddrWrapper(value.(netip.Addr)), buf)
+}
+
+type wrapMapStringToPointerStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMapStringToPointerStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMapStringToPointerStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(mapStringToPointerStringWrapper(value.(map[string]*string)), buf)
+}
+
+type wrapMapStringToStringEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMapStringToStringEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMapStringToStringEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(mapStringToStringWrapper(value.(map[string]string)), buf)
+}
+
+type wrapByte16EncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapByte16EncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapByte16EncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(byte16Wrapper(value.([16]byte)), buf)
+}
+
+type wrapByteSliceEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapByteSliceEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapByteSliceEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(byteSliceWrapper(value.([]byte)), buf)
+}
+
+type wrapFmtStringerEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapFmtStringerEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapFmtStringerEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode(fmtStringerWrapper{value.(fmt.Stringer)}, buf)
+}
+
+// TryWrapStructPlan tries to wrap a struct with a wrapper that implements CompositeIndexGetter.
+func TryWrapStructEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Struct {
+ exportedFields := getExportedFieldValues(reflect.ValueOf(value))
+ if len(exportedFields) == 0 {
+ return nil, nil, false
+ }
+
+ w := structWrapper{
+ s: value,
+ exportedFields: exportedFields,
+ }
+ return &wrapAnyStructEncodePlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapAnyStructEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapAnyStructEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapAnyStructEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := structWrapper{
+ s: value,
+ exportedFields: getExportedFieldValues(reflect.ValueOf(value)),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func getExportedFieldValues(structValue reflect.Value) []reflect.Value {
+ structType := structValue.Type()
+ exportedFields := make([]reflect.Value, 0, structValue.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ sf := structType.Field(i)
+ if sf.IsExported() {
+ exportedFields = append(exportedFields, structValue.Field(i))
+ }
+ }
+
+ return exportedFields
+}
+
+func TryWrapSliceEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ // Avoid using reflect path for common types.
+ switch value := value.(type) {
+ case []int16:
+ return &wrapSliceEncodePlan[int16]{}, (FlatArray[int16])(value), true
+ case []int32:
+ return &wrapSliceEncodePlan[int32]{}, (FlatArray[int32])(value), true
+ case []int64:
+ return &wrapSliceEncodePlan[int64]{}, (FlatArray[int64])(value), true
+ case []float32:
+ return &wrapSliceEncodePlan[float32]{}, (FlatArray[float32])(value), true
+ case []float64:
+ return &wrapSliceEncodePlan[float64]{}, (FlatArray[float64])(value), true
+ case []string:
+ return &wrapSliceEncodePlan[string]{}, (FlatArray[string])(value), true
+ case []time.Time:
+ return &wrapSliceEncodePlan[time.Time]{}, (FlatArray[time.Time])(value), true
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Slice {
+ w := anySliceArrayReflect{
+ slice: reflect.ValueOf(value),
+ }
+ return &wrapSliceEncodeReflectPlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapSliceEncodePlan[T any] struct {
+ next EncodePlan
+}
+
+func (plan *wrapSliceEncodePlan[T]) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapSliceEncodePlan[T]) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ return plan.next.Encode((FlatArray[T])(value.([]T)), buf)
+}
+
+type wrapSliceEncodeReflectPlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapSliceEncodeReflectPlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapSliceEncodeReflectPlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anySliceArrayReflect{
+ slice: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func TryWrapMultiDimSliceEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ sliceValue := reflect.ValueOf(value)
+ if sliceValue.Kind() == reflect.Slice {
+ valueElemType := sliceValue.Type().Elem()
+
+ if valueElemType.Kind() == reflect.Slice {
+ if !isRagged(sliceValue) {
+ w := anyMultiDimSliceArray{
+ slice: reflect.ValueOf(value),
+ }
+ return &wrapMultiDimSliceEncodePlan{}, &w, true
+ }
+ }
+ }
+
+ return nil, nil, false
+}
+
+type wrapMultiDimSliceEncodePlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapMultiDimSliceEncodePlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapMultiDimSliceEncodePlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anyMultiDimSliceArray{
+ slice: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(&w, buf)
+}
+
+func TryWrapArrayEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
+ if _, ok := value.(driver.Valuer); ok {
+ return nil, nil, false
+ }
+
+ if valueType := reflect.TypeOf(value); valueType != nil && valueType.Kind() == reflect.Array {
+ w := anyArrayArrayReflect{
+ array: reflect.ValueOf(value),
+ }
+ return &wrapArrayEncodeReflectPlan{}, w, true
+ }
+
+ return nil, nil, false
+}
+
+type wrapArrayEncodeReflectPlan struct {
+ next EncodePlan
+}
+
+func (plan *wrapArrayEncodeReflectPlan) SetNext(next EncodePlan) { plan.next = next }
+
+func (plan *wrapArrayEncodeReflectPlan) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ w := anyArrayArrayReflect{
+ array: reflect.ValueOf(value),
+ }
+
+ return plan.next.Encode(w, buf)
+}
+
+func newEncodeError(value any, m *Map, oid uint32, formatCode int16, err error) error {
+ var format string
+ switch formatCode {
+ case TextFormatCode:
+ format = "text"
+ case BinaryFormatCode:
+ format = "binary"
+ default:
+ format = fmt.Sprintf("unknown (%d)", formatCode)
+ }
+
+ var dataTypeName string
+ if t, ok := m.TypeForOID(oid); ok {
+ dataTypeName = t.Name
+ } else {
+ dataTypeName = "unknown type"
+ }
+
+ return fmt.Errorf("unable to encode %#v into %s format for %s (OID %d): %w", value, format, dataTypeName, oid, err)
+}
+
+// Encode appends the encoded bytes of value to buf. If value is the SQL value NULL then append nothing and return
+// (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data
+// written.
+func (m *Map) Encode(oid uint32, formatCode int16, value any, buf []byte) (newBuf []byte, err error) {
+ if isNil, callNilDriverValuer := isNilDriverValuer(value); isNil {
+ if callNilDriverValuer {
+ newBuf, err = (&encodePlanDriverValuer{m: m, oid: oid, formatCode: formatCode}).Encode(value, buf)
+ if err != nil {
+ return nil, newEncodeError(value, m, oid, formatCode, err)
+ }
+
+ return newBuf, nil
+ } else {
+ return nil, nil
+ }
+ }
+
+ plan := m.PlanEncode(oid, formatCode, value)
+ if plan == nil {
+ return nil, newEncodeError(value, m, oid, formatCode, errors.New("cannot find encode plan"))
+ }
+
+ newBuf, err = plan.Encode(value, buf)
+ if err != nil {
+ return nil, newEncodeError(value, m, oid, formatCode, err)
+ }
+
+ return newBuf, nil
+}
+
+// SQLScanner returns a database/sql.Scanner for v. This is necessary for types like Array[T] and Range[T] where the
+// type needs assistance from Map to implement the sql.Scanner interface. It is not necessary for types like Box that
+// implement sql.Scanner directly.
+//
+// This uses the type of v to look up the PostgreSQL OID that v presumably came from. This means v must be registered
+// with m by calling RegisterDefaultPgType.
+func (m *Map) SQLScanner(v any) sql.Scanner {
+ if s, ok := v.(sql.Scanner); ok {
+ return s
+ }
+
+ return &sqlScannerWrapper{m: m, v: v}
+}
+
+type sqlScannerWrapper struct {
+ m *Map
+ v any
+}
+
+func (w *sqlScannerWrapper) Scan(src any) error {
+ t, ok := w.m.TypeForValue(w.v)
+ if !ok {
+ return fmt.Errorf("cannot convert to sql.Scanner: cannot find registered type for %T", w.v)
+ }
+
+ var bufSrc []byte
+ if src != nil {
+ switch src := src.(type) {
+ case string:
+ bufSrc = []byte(src)
+ case []byte:
+ bufSrc = src
+ default:
+ bufSrc = []byte(fmt.Sprint(bufSrc))
+ }
+ }
+
+ return w.m.Scan(t.OID, TextFormatCode, bufSrc, w.v)
+}
+
+// canBeNil returns true if value can be nil.
+func canBeNil(value any) bool {
+ refVal := reflect.ValueOf(value)
+ kind := refVal.Kind()
+ switch kind {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+ return true
+ default:
+ return false
+ }
+}
+
+// valuerReflectType is a reflect.Type for driver.Valuer. It has confusing syntax because reflect.TypeOf returns nil
+// when it's argument is a nil interface value. So we use a pointer to the interface and call Elem to get the actual
+// type. Yuck.
+//
+// This can be simplified in Go 1.22 with reflect.TypeFor.
+//
+// var valuerReflectType = reflect.TypeFor[driver.Valuer]()
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// isNilDriverValuer returns true if value is any type of nil unless it implements driver.Valuer. *T is not considered to implement
+// driver.Valuer if it is only implemented by T.
+func isNilDriverValuer(value any) (isNil bool, callNilDriverValuer bool) {
+ if value == nil {
+ return true, false
+ }
+
+ refVal := reflect.ValueOf(value)
+ kind := refVal.Kind()
+ switch kind {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+ if !refVal.IsNil() {
+ return false, false
+ }
+
+ if _, ok := value.(driver.Valuer); ok {
+ if kind == reflect.Ptr {
+ // The type assertion will succeed if driver.Valuer is implemented on T or *T. Check if it is implemented on *T
+ // by checking if it is not implemented on *T.
+ return true, !refVal.Type().Elem().Implements(valuerReflectType)
+ } else {
+ return true, true
+ }
+ }
+
+ return true, false
+ default:
+ return false, false
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
new file mode 100644
index 0000000..9525f37
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
@@ -0,0 +1,226 @@
+package pgtype
+
+import (
+ "encoding/json"
+ "net"
+ "net/netip"
+ "reflect"
+ "sync"
+ "time"
+)
+
+var (
+ // defaultMap contains default mappings between PostgreSQL server types and Go type handling logic.
+ defaultMap *Map
+ defaultMapInitOnce = sync.Once{}
+)
+
+func initDefaultMap() {
+ defaultMap = &Map{
+ oidToType: make(map[uint32]*Type),
+ nameToType: make(map[string]*Type),
+ reflectTypeToName: make(map[reflect.Type]string),
+ oidToFormatCode: make(map[uint32]int16),
+
+ memoizedScanPlans: make(map[uint32]map[reflect.Type][2]ScanPlan),
+ memoizedEncodePlans: make(map[uint32]map[reflect.Type][2]EncodePlan),
+
+ TryWrapEncodePlanFuncs: []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapBuiltinTypeEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ TryWrapStructEncodePlan,
+ TryWrapSliceEncodePlan,
+ TryWrapMultiDimSliceEncodePlan,
+ TryWrapArrayEncodePlan,
+ },
+
+ TryWrapScanPlanFuncs: []TryWrapScanPlanFunc{
+ TryPointerPointerScanPlan,
+ TryWrapBuiltinTypeScanPlan,
+ TryFindUnderlyingTypeScanPlan,
+ TryWrapStructScanPlan,
+ TryWrapPtrSliceScanPlan,
+ TryWrapPtrMultiDimSliceScanPlan,
+ TryWrapPtrArrayScanPlan,
+ },
+ }
+
+ // Base types
+ defaultMap.RegisterType(&Type{Name: "aclitem", OID: ACLItemOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
+ defaultMap.RegisterType(&Type{Name: "bit", OID: BitOID, Codec: BitsCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bool", OID: BoolOID, Codec: BoolCodec{}})
+ defaultMap.RegisterType(&Type{Name: "box", OID: BoxOID, Codec: BoxCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bpchar", OID: BPCharOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "bytea", OID: ByteaOID, Codec: ByteaCodec{}})
+ defaultMap.RegisterType(&Type{Name: "char", OID: QCharOID, Codec: QCharCodec{}})
+ defaultMap.RegisterType(&Type{Name: "cid", OID: CIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "cidr", OID: CIDROID, Codec: InetCodec{}})
+ defaultMap.RegisterType(&Type{Name: "circle", OID: CircleOID, Codec: CircleCodec{}})
+ defaultMap.RegisterType(&Type{Name: "date", OID: DateOID, Codec: DateCodec{}})
+ defaultMap.RegisterType(&Type{Name: "float4", OID: Float4OID, Codec: Float4Codec{}})
+ defaultMap.RegisterType(&Type{Name: "float8", OID: Float8OID, Codec: Float8Codec{}})
+ defaultMap.RegisterType(&Type{Name: "inet", OID: InetOID, Codec: InetCodec{}})
+ defaultMap.RegisterType(&Type{Name: "int2", OID: Int2OID, Codec: Int2Codec{}})
+ defaultMap.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}})
+ defaultMap.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}})
+ defaultMap.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}})
+ defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: &JSONCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
+ defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: &JSONBCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
+ defaultMap.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
+ defaultMap.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}})
+ defaultMap.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}})
+ defaultMap.RegisterType(&Type{Name: "macaddr8", OID: Macaddr8OID, Codec: MacaddrCodec{}})
+ defaultMap.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}})
+ defaultMap.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}})
+ defaultMap.RegisterType(&Type{Name: "oid", OID: OIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "path", OID: PathOID, Codec: PathCodec{}})
+ defaultMap.RegisterType(&Type{Name: "point", OID: PointOID, Codec: PointCodec{}})
+ defaultMap.RegisterType(&Type{Name: "polygon", OID: PolygonOID, Codec: PolygonCodec{}})
+ defaultMap.RegisterType(&Type{Name: "record", OID: RecordOID, Codec: RecordCodec{}})
+ defaultMap.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}})
+ defaultMap.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}})
+ defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: &TimestampCodec{}})
+ defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: &TimestamptzCodec{}})
+ defaultMap.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}})
+ defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
+ defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
+ defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
+
+ // Range types
+ defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
+ defaultMap.RegisterType(&Type{Name: "int4range", OID: Int4rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int4OID]}})
+ defaultMap.RegisterType(&Type{Name: "int8range", OID: Int8rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int8OID]}})
+ defaultMap.RegisterType(&Type{Name: "numrange", OID: NumrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[NumericOID]}})
+ defaultMap.RegisterType(&Type{Name: "tsrange", OID: TsrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
+ defaultMap.RegisterType(&Type{Name: "tstzrange", OID: TstzrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
+
+ // Multirange types
+ defaultMap.RegisterType(&Type{Name: "datemultirange", OID: DatemultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "int4multirange", OID: Int4multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "int8multirange", OID: Int8multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "nummultirange", OID: NummultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "tsmultirange", OID: TsmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "tstzmultirange", OID: TstzmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
+
+ // Array types
+ defaultMap.RegisterType(&Type{Name: "_aclitem", OID: ACLItemArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ACLItemOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bit", OID: BitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BitOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bool", OID: BoolArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoolOID]}})
+ defaultMap.RegisterType(&Type{Name: "_box", OID: BoxArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoxOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bpchar", OID: BPCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BPCharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_bytea", OID: ByteaArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ByteaOID]}})
+ defaultMap.RegisterType(&Type{Name: "_char", OID: QCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[QCharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_cid", OID: CIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_cidr", OID: CIDRArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDROID]}})
+ defaultMap.RegisterType(&Type{Name: "_circle", OID: CircleArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CircleOID]}})
+ defaultMap.RegisterType(&Type{Name: "_date", OID: DateArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DateOID]}})
+ defaultMap.RegisterType(&Type{Name: "_daterange", OID: DaterangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_float4", OID: Float4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float4OID]}})
+ defaultMap.RegisterType(&Type{Name: "_float8", OID: Float8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float8OID]}})
+ defaultMap.RegisterType(&Type{Name: "_inet", OID: InetArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[InetOID]}})
+ defaultMap.RegisterType(&Type{Name: "_int2", OID: Int2ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int2OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int4", OID: Int4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int4range", OID: Int4rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_int8", OID: Int8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8OID]}})
+ defaultMap.RegisterType(&Type{Name: "_int8range", OID: Int8rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_interval", OID: IntervalArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[IntervalOID]}})
+ defaultMap.RegisterType(&Type{Name: "_json", OID: JSONArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONOID]}})
+ defaultMap.RegisterType(&Type{Name: "_jsonb", OID: JSONBArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONBOID]}})
+ defaultMap.RegisterType(&Type{Name: "_jsonpath", OID: JSONPathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONPathOID]}})
+ defaultMap.RegisterType(&Type{Name: "_line", OID: LineArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LineOID]}})
+ defaultMap.RegisterType(&Type{Name: "_lseg", OID: LsegArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LsegOID]}})
+ defaultMap.RegisterType(&Type{Name: "_macaddr", OID: MacaddrArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[MacaddrOID]}})
+ defaultMap.RegisterType(&Type{Name: "_name", OID: NameArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NameOID]}})
+ defaultMap.RegisterType(&Type{Name: "_numeric", OID: NumericArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumericOID]}})
+ defaultMap.RegisterType(&Type{Name: "_numrange", OID: NumrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_oid", OID: OIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[OIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_path", OID: PathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PathOID]}})
+ defaultMap.RegisterType(&Type{Name: "_point", OID: PointArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PointOID]}})
+ defaultMap.RegisterType(&Type{Name: "_polygon", OID: PolygonArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PolygonOID]}})
+ defaultMap.RegisterType(&Type{Name: "_record", OID: RecordArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[RecordOID]}})
+ defaultMap.RegisterType(&Type{Name: "_text", OID: TextArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TextOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tid", OID: TIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_time", OID: TimeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_timestamp", OID: TimestampArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
+ defaultMap.RegisterType(&Type{Name: "_timestamptz", OID: TimestamptzArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tsrange", OID: TsrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_tstzrange", OID: TstzrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
+ defaultMap.RegisterType(&Type{Name: "_uuid", OID: UUIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[UUIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
+ defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
+ defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
+
+ // Integer types that directly map to a PostgreSQL type
+ registerDefaultPgTypeVariants[int16](defaultMap, "int2")
+ registerDefaultPgTypeVariants[int32](defaultMap, "int4")
+ registerDefaultPgTypeVariants[int64](defaultMap, "int8")
+
+ // Integer types that do not have a direct match to a PostgreSQL type
+ registerDefaultPgTypeVariants[int8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[int](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint16](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint32](defaultMap, "int8")
+ registerDefaultPgTypeVariants[uint64](defaultMap, "numeric")
+ registerDefaultPgTypeVariants[uint](defaultMap, "numeric")
+
+ registerDefaultPgTypeVariants[float32](defaultMap, "float4")
+ registerDefaultPgTypeVariants[float64](defaultMap, "float8")
+
+ registerDefaultPgTypeVariants[bool](defaultMap, "bool")
+ registerDefaultPgTypeVariants[time.Time](defaultMap, "timestamptz")
+ registerDefaultPgTypeVariants[time.Duration](defaultMap, "interval")
+ registerDefaultPgTypeVariants[string](defaultMap, "text")
+ registerDefaultPgTypeVariants[json.RawMessage](defaultMap, "json")
+ registerDefaultPgTypeVariants[[]byte](defaultMap, "bytea")
+
+ registerDefaultPgTypeVariants[net.IP](defaultMap, "inet")
+ registerDefaultPgTypeVariants[net.IPNet](defaultMap, "cidr")
+ registerDefaultPgTypeVariants[netip.Addr](defaultMap, "inet")
+ registerDefaultPgTypeVariants[netip.Prefix](defaultMap, "cidr")
+
+ // pgtype provided structs
+ registerDefaultPgTypeVariants[Bits](defaultMap, "varbit")
+ registerDefaultPgTypeVariants[Bool](defaultMap, "bool")
+ registerDefaultPgTypeVariants[Box](defaultMap, "box")
+ registerDefaultPgTypeVariants[Circle](defaultMap, "circle")
+ registerDefaultPgTypeVariants[Date](defaultMap, "date")
+ registerDefaultPgTypeVariants[Range[Date]](defaultMap, "daterange")
+ registerDefaultPgTypeVariants[Multirange[Range[Date]]](defaultMap, "datemultirange")
+ registerDefaultPgTypeVariants[Float4](defaultMap, "float4")
+ registerDefaultPgTypeVariants[Float8](defaultMap, "float8")
+ registerDefaultPgTypeVariants[Range[Float8]](defaultMap, "numrange") // There is no PostgreSQL builtin float8range so map it to numrange.
+ registerDefaultPgTypeVariants[Multirange[Range[Float8]]](defaultMap, "nummultirange") // There is no PostgreSQL builtin float8multirange so map it to nummultirange.
+ registerDefaultPgTypeVariants[Int2](defaultMap, "int2")
+ registerDefaultPgTypeVariants[Int4](defaultMap, "int4")
+ registerDefaultPgTypeVariants[Range[Int4]](defaultMap, "int4range")
+ registerDefaultPgTypeVariants[Multirange[Range[Int4]]](defaultMap, "int4multirange")
+ registerDefaultPgTypeVariants[Int8](defaultMap, "int8")
+ registerDefaultPgTypeVariants[Range[Int8]](defaultMap, "int8range")
+ registerDefaultPgTypeVariants[Multirange[Range[Int8]]](defaultMap, "int8multirange")
+ registerDefaultPgTypeVariants[Interval](defaultMap, "interval")
+ registerDefaultPgTypeVariants[Line](defaultMap, "line")
+ registerDefaultPgTypeVariants[Lseg](defaultMap, "lseg")
+ registerDefaultPgTypeVariants[Numeric](defaultMap, "numeric")
+ registerDefaultPgTypeVariants[Range[Numeric]](defaultMap, "numrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Numeric]]](defaultMap, "nummultirange")
+ registerDefaultPgTypeVariants[Path](defaultMap, "path")
+ registerDefaultPgTypeVariants[Point](defaultMap, "point")
+ registerDefaultPgTypeVariants[Polygon](defaultMap, "polygon")
+ registerDefaultPgTypeVariants[TID](defaultMap, "tid")
+ registerDefaultPgTypeVariants[Text](defaultMap, "text")
+ registerDefaultPgTypeVariants[Time](defaultMap, "time")
+ registerDefaultPgTypeVariants[Timestamp](defaultMap, "timestamp")
+ registerDefaultPgTypeVariants[Timestamptz](defaultMap, "timestamptz")
+ registerDefaultPgTypeVariants[Range[Timestamp]](defaultMap, "tsrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Timestamp]]](defaultMap, "tsmultirange")
+ registerDefaultPgTypeVariants[Range[Timestamptz]](defaultMap, "tstzrange")
+ registerDefaultPgTypeVariants[Multirange[Range[Timestamptz]]](defaultMap, "tstzmultirange")
+ registerDefaultPgTypeVariants[UUID](defaultMap, "uuid")
+
+ defaultMap.buildReflectTypeToType()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/point.go b/vendor/github.com/jackc/pgx/v5/pgtype/point.go
new file mode 100644
index 0000000..09b19bb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/point.go
@@ -0,0 +1,266 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Vec2 struct {
+ X float64
+ Y float64
+}
+
+type PointScanner interface {
+ ScanPoint(v Point) error
+}
+
+type PointValuer interface {
+ PointValue() (Point, error)
+}
+
+type Point struct {
+ P Vec2
+ Valid bool
+}
+
+func (p *Point) ScanPoint(v Point) error {
+ *p = v
+ return nil
+}
+
+func (p Point) PointValue() (Point, error) {
+ return p, nil
+}
+
+func parsePoint(src []byte) (*Point, error) {
+ if src == nil || bytes.Equal(src, []byte("null")) {
+ return &Point{}, nil
+ }
+
+ if len(src) < 5 {
+ return nil, fmt.Errorf("invalid length for point: %v", len(src))
+ }
+ if src[0] == '"' && src[len(src)-1] == '"' {
+ src = src[1 : len(src)-1]
+ }
+ sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return nil, fmt.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(sx, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ y, err := strconv.ParseFloat(sy, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Point{P: Vec2{x, y}, Valid: true}, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Point) Scan(src any) error {
+ if src == nil {
+ *dst = Point{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPointScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Point) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := PointCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+func (src Point) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var buff bytes.Buffer
+ buff.WriteByte('"')
+ buff.WriteString(fmt.Sprintf("(%g,%g)", src.P.X, src.P.Y))
+ buff.WriteByte('"')
+ return buff.Bytes(), nil
+}
+
+func (dst *Point) UnmarshalJSON(point []byte) error {
+ p, err := parsePoint(point)
+ if err != nil {
+ return err
+ }
+ *dst = *p
+ return nil
+}
+
+type PointCodec struct{}
+
+func (PointCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PointCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PointCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PointValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPointCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPointCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPointCodecBinary struct{}
+
+func (encodePlanPointCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ point, err := value.(PointValuer).PointValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !point.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(point.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(point.P.Y))
+ return buf, nil
+}
+
+type encodePlanPointCodecText struct{}
+
+func (encodePlanPointCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ point, err := value.(PointValuer).PointValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !point.Valid {
+ return nil, nil
+ }
+
+ return append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(point.P.X, 'f', -1, 64),
+ strconv.FormatFloat(point.P.Y, 'f', -1, 64),
+ )...), nil
+}
+
+func (PointCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PointScanner:
+ return scanPlanBinaryPointToPointScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PointScanner:
+ return scanPlanTextAnyToPointScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c PointCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PointCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var point Point
+ err := codecScan(c, m, oid, format, src, &point)
+ if err != nil {
+ return nil, err
+ }
+ return point, nil
+}
+
+type scanPlanBinaryPointToPointScanner struct{}
+
+func (scanPlanBinaryPointToPointScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PointScanner)
+
+ if src == nil {
+ return scanner.ScanPoint(Point{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for point: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+
+ return scanner.ScanPoint(Point{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPointScanner struct{}
+
+func (scanPlanTextAnyToPointScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PointScanner)
+
+ if src == nil {
+ return scanner.ScanPoint(Point{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for point: %v", len(src))
+ }
+
+ sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return fmt.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(sx, 64)
+ if err != nil {
+ return err
+ }
+
+ y, err := strconv.ParseFloat(sy, 64)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanPoint(Point{P: Vec2{x, y}, Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go b/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go
new file mode 100644
index 0000000..04b0ba6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/polygon.go
@@ -0,0 +1,253 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type PolygonScanner interface {
+ ScanPolygon(v Polygon) error
+}
+
+type PolygonValuer interface {
+ PolygonValue() (Polygon, error)
+}
+
+type Polygon struct {
+ P []Vec2
+ Valid bool
+}
+
+func (p *Polygon) ScanPolygon(v Polygon) error {
+ *p = v
+ return nil
+}
+
+func (p Polygon) PolygonValue() (Polygon, error) {
+ return p, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (p *Polygon) Scan(src any) error {
+ if src == nil {
+ *p = Polygon{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToPolygonScanner{}.Scan([]byte(src), p)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (p Polygon) Value() (driver.Value, error) {
+ if !p.Valid {
+ return nil, nil
+ }
+
+ buf, err := PolygonCodec{}.PlanEncode(nil, 0, TextFormatCode, p).Encode(p, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), err
+}
+
+type PolygonCodec struct{}
+
+func (PolygonCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (PolygonCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (PolygonCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(PolygonValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanPolygonCodecBinary{}
+ case TextFormatCode:
+ return encodePlanPolygonCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanPolygonCodecBinary struct{}
+
+func (encodePlanPolygonCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ polygon, err := value.(PolygonValuer).PolygonValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !polygon.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(polygon.P)))
+
+ for _, p := range polygon.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+type encodePlanPolygonCodecText struct{}
+
+func (encodePlanPolygonCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ polygon, err := value.(PolygonValuer).PolygonValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !polygon.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, '(')
+
+ for i, p := range polygon.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%s,%s)`,
+ strconv.FormatFloat(p.X, 'f', -1, 64),
+ strconv.FormatFloat(p.Y, 'f', -1, 64),
+ )...)
+ }
+
+ buf = append(buf, ')')
+
+ return buf, nil
+}
+
+func (PolygonCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case PolygonScanner:
+ return scanPlanBinaryPolygonToPolygonScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case PolygonScanner:
+ return scanPlanTextAnyToPolygonScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryPolygonToPolygonScanner struct{}
+
+func (scanPlanBinaryPolygonToPolygonScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PolygonScanner)
+
+ if src == nil {
+ return scanner.ScanPolygon(Polygon{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for polygon: %v", len(src))
+ }
+
+ pointCount := int(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ if 4+pointCount*16 != len(src) {
+ return fmt.Errorf("invalid length for Polygon with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ return scanner.ScanPolygon(Polygon{
+ P: points,
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToPolygonScanner struct{}
+
+func (scanPlanTextAnyToPolygonScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(PolygonScanner)
+
+ if src == nil {
+ return scanner.ScanPolygon(Polygon{})
+ }
+
+ if len(src) < 7 {
+ return fmt.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ return scanner.ScanPolygon(Polygon{P: points, Valid: true})
+}
+
+func (c PolygonCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c PolygonCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var polygon Polygon
+ err := codecScan(c, m, oid, format, src, &polygon)
+ if err != nil {
+ return nil, err
+ }
+ return polygon, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go b/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go
new file mode 100644
index 0000000..fc40a5b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/qchar.go
@@ -0,0 +1,141 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "math"
+)
+
+// QCharCodec is for PostgreSQL's special 8-bit-only "char" type more akin to the C
+// language's char type, or Go's byte type. (Note that the name in PostgreSQL
+// itself is "char", in double-quotes, and not char.) It gets used a lot in
+// PostgreSQL's system tables to hold a single ASCII character value (eg
+// pg_class.relkind). It is named Qchar for quoted char to disambiguate from SQL
+// standard type char.
+type QCharCodec struct{}
+
+func (QCharCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (QCharCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (QCharCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case byte:
+ return encodePlanQcharCodecByte{}
+ case rune:
+ return encodePlanQcharCodecRune{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanQcharCodecByte struct{}
+
+func (encodePlanQcharCodecByte) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ b := value.(byte)
+ buf = append(buf, b)
+ return buf, nil
+}
+
+type encodePlanQcharCodecRune struct{}
+
+func (encodePlanQcharCodecRune) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ r := value.(rune)
+ if r > math.MaxUint8 {
+ return nil, fmt.Errorf(`%v cannot be encoded to "char"`, r)
+ }
+ b := byte(r)
+ buf = append(buf, b)
+ return buf, nil
+}
+
+func (QCharCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *byte:
+ return scanPlanQcharCodecByte{}
+ case *rune:
+ return scanPlanQcharCodecRune{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanQcharCodecByte struct{}
+
+func (scanPlanQcharCodecByte) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) > 1 {
+ return fmt.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ b := dst.(*byte)
+ // In the text format the zero value is returned as a zero byte value instead of 0
+ if len(src) == 0 {
+ *b = 0
+ } else {
+ *b = src[0]
+ }
+
+ return nil
+}
+
+type scanPlanQcharCodecRune struct{}
+
+func (scanPlanQcharCodecRune) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) > 1 {
+ return fmt.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ r := dst.(*rune)
+ // In the text format the zero value is returned as a zero byte value instead of 0
+ if len(src) == 0 {
+ *r = 0
+ } else {
+ *r = rune(src[0])
+ }
+
+ return nil
+}
+
+func (c QCharCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r rune
+ err := codecScan(c, m, oid, format, src, &r)
+ if err != nil {
+ return nil, err
+ }
+ return string(r), nil
+}
+
+func (c QCharCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r rune
+ err := codecScan(c, m, oid, format, src, &r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/range.go b/vendor/github.com/jackc/pgx/v5/pgtype/range.go
new file mode 100644
index 0000000..16427cc
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/range.go
@@ -0,0 +1,322 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+)
+
+type BoundType byte
+
+const (
+ Inclusive = BoundType('i')
+ Exclusive = BoundType('e')
+ Unbounded = BoundType('U')
+ Empty = BoundType('E')
+)
+
+func (bt BoundType) String() string {
+ return string(bt)
+}
+
+type untypedTextRange struct {
+ Lower string
+ Upper string
+ LowerType BoundType
+ UpperType BoundType
+}
+
+func parseUntypedTextRange(src string) (*untypedTextRange, error) {
+ utr := &untypedTextRange{}
+ if src == "empty" {
+ utr.LowerType = Empty
+ utr.UpperType = Empty
+ return utr, nil
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower bound: %w", err)
+ }
+ switch r {
+ case '(':
+ utr.LowerType = Exclusive
+ case '[':
+ utr.LowerType = Inclusive
+ default:
+ return nil, fmt.Errorf("missing lower bound, instead got: %v", string(r))
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower value: %w", err)
+ }
+ buf.UnreadRune()
+
+ if r == ',' {
+ utr.LowerType = Unbounded
+ } else {
+ utr.Lower, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid lower value: %w", err)
+ }
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("missing range separator: %w", err)
+ }
+ if r != ',' {
+ return nil, fmt.Errorf("missing range separator: %v", r)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("invalid upper value: %w", err)
+ }
+
+ if r == ')' || r == ']' {
+ utr.UpperType = Unbounded
+ } else {
+ buf.UnreadRune()
+ utr.Upper, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, fmt.Errorf("invalid upper value: %w", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, fmt.Errorf("missing upper bound: %w", err)
+ }
+ switch r {
+ case ')':
+ utr.UpperType = Exclusive
+ case ']':
+ utr.UpperType = Inclusive
+ default:
+ return nil, fmt.Errorf("missing upper bound, instead got: %v", string(r))
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, fmt.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return utr, nil
+}
+
+func rangeParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return rangeParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case ',', '[', ']', '(', ')':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func rangeParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r != '"' {
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ }
+ s.WriteRune(r)
+ }
+}
+
+type untypedBinaryRange struct {
+ Lower []byte
+ Upper []byte
+ LowerType BoundType
+ UpperType BoundType
+}
+
+// 0 = () = 00000
+// 1 = empty = 00001
+// 2 = [) = 00010
+// 4 = (] = 00100
+// 6 = [] = 00110
+// 8 = ) = 01000
+// 12 = ] = 01100
+// 16 = ( = 10000
+// 18 = [ = 10010
+// 24 = = 11000
+
+const emptyMask = 1
+const lowerInclusiveMask = 2
+const upperInclusiveMask = 4
+const lowerUnboundedMask = 8
+const upperUnboundedMask = 16
+
+func parseUntypedBinaryRange(src []byte) (*untypedBinaryRange, error) {
+ ubr := &untypedBinaryRange{}
+
+ if len(src) == 0 {
+ return nil, fmt.Errorf("range too short: %v", len(src))
+ }
+
+ rangeType := src[0]
+ rp := 1
+
+ if rangeType&emptyMask > 0 {
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing empty range: %v", len(src[rp:]))
+ }
+ ubr.LowerType = Empty
+ ubr.UpperType = Empty
+ return ubr, nil
+ }
+
+ if rangeType&lowerInclusiveMask > 0 {
+ ubr.LowerType = Inclusive
+ } else if rangeType&lowerUnboundedMask > 0 {
+ ubr.LowerType = Unbounded
+ } else {
+ ubr.LowerType = Exclusive
+ }
+
+ if rangeType&upperInclusiveMask > 0 {
+ ubr.UpperType = Inclusive
+ } else if rangeType&upperUnboundedMask > 0 {
+ ubr.UpperType = Unbounded
+ } else {
+ ubr.UpperType = Exclusive
+ }
+
+ if ubr.LowerType == Unbounded && ubr.UpperType == Unbounded {
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing unbounded range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if len(src[rp:]) < 4 {
+ return nil, fmt.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ val := src[rp : rp+valueLen]
+ rp += valueLen
+
+ if ubr.LowerType != Unbounded {
+ ubr.Lower = val
+ } else {
+ ubr.Upper = val
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if ubr.UpperType != Unbounded {
+ if len(src[rp:]) < 4 {
+ return nil, fmt.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ ubr.Upper = src[rp : rp+valueLen]
+ rp += valueLen
+ }
+
+ if len(src[rp:]) > 0 {
+ return nil, fmt.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+
+ return ubr, nil
+
+}
+
+// Range is a generic range type.
+type Range[T any] struct {
+ Lower T
+ Upper T
+ LowerType BoundType
+ UpperType BoundType
+ Valid bool
+}
+
+func (r Range[T]) IsNull() bool {
+ return !r.Valid
+}
+
+func (r Range[T]) BoundTypes() (lower, upper BoundType) {
+ return r.LowerType, r.UpperType
+}
+
+func (r Range[T]) Bounds() (lower, upper any) {
+ return &r.Lower, &r.Upper
+}
+
+func (r *Range[T]) ScanNull() error {
+ *r = Range[T]{}
+ return nil
+}
+
+func (r *Range[T]) ScanBounds() (lowerTarget, upperTarget any) {
+ return &r.Lower, &r.Upper
+}
+
+func (r *Range[T]) SetBoundTypes(lower, upper BoundType) error {
+ if lower == Unbounded || lower == Empty {
+ var zero T
+ r.Lower = zero
+ }
+ if upper == Unbounded || upper == Empty {
+ var zero T
+ r.Upper = zero
+ }
+ r.LowerType = lower
+ r.UpperType = upper
+ r.Valid = true
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
new file mode 100644
index 0000000..684f1bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
@@ -0,0 +1,379 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+// RangeValuer is a type that can be converted into a PostgreSQL range.
+type RangeValuer interface {
+ // IsNull returns true if the value is SQL NULL.
+ IsNull() bool
+
+ // BoundTypes returns the lower and upper bound types.
+ BoundTypes() (lower, upper BoundType)
+
+ // Bounds returns the lower and upper range values.
+ Bounds() (lower, upper any)
+}
+
+// RangeScanner is a type can be scanned from a PostgreSQL range.
+type RangeScanner interface {
+ // ScanNull sets the value to SQL NULL.
+ ScanNull() error
+
+ // ScanBounds returns values usable as a scan target. The returned values may not be scanned if the range is empty or
+ // the bound type is unbounded.
+ ScanBounds() (lowerTarget, upperTarget any)
+
+ // SetBoundTypes sets the lower and upper bound types. ScanBounds will be called and the returned values scanned
+ // (if appropriate) before SetBoundTypes is called. If the bound types are unbounded or empty this method must
+ // also set the bound values.
+ SetBoundTypes(lower, upper BoundType) error
+}
+
+// RangeCodec is a codec for any range type.
+type RangeCodec struct {
+ ElementType *Type
+}
+
+func (c *RangeCodec) FormatSupported(format int16) bool {
+ return c.ElementType.Codec.FormatSupported(format)
+}
+
+func (c *RangeCodec) PreferredFormat() int16 {
+ if c.FormatSupported(BinaryFormatCode) {
+ return BinaryFormatCode
+ }
+ return TextFormatCode
+}
+
+func (c *RangeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(RangeValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return &encodePlanRangeCodecRangeValuerToBinary{rc: c, m: m}
+ case TextFormatCode:
+ return &encodePlanRangeCodecRangeValuerToText{rc: c, m: m}
+ }
+
+ return nil
+}
+
+type encodePlanRangeCodecRangeValuerToBinary struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(RangeValuer)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ lowerType, upperType := getter.BoundTypes()
+ lower, upper := getter.Bounds()
+
+ var rangeType byte
+ switch lowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, fmt.Errorf("unknown LowerType: %v", lowerType)
+ }
+
+ switch upperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, fmt.Errorf("unknown UpperType: %v", upperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ if lowerType != Unbounded {
+ if lower == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ lowerPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, BinaryFormatCode, lower)
+ if lowerPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", lower)
+ }
+
+ buf, err = lowerPlan.Encode(lower, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if upperType != Unbounded {
+ if upper == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ upperPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, BinaryFormatCode, upper)
+ if upperPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", upper)
+ }
+
+ buf, err = upperPlan.Encode(upper, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+type encodePlanRangeCodecRangeValuerToText struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ getter := value.(RangeValuer)
+
+ if getter.IsNull() {
+ return nil, nil
+ }
+
+ lowerType, upperType := getter.BoundTypes()
+ lower, upper := getter.Bounds()
+
+ switch lowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, fmt.Errorf("unknown lower bound type %v", lowerType)
+ }
+
+ if lowerType != Unbounded {
+ if lower == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+
+ lowerPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, lower)
+ if lowerPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", lower)
+ }
+
+ buf, err = lowerPlan.Encode(lower, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if upperType != Unbounded {
+ if upper == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+
+ upperPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, upper)
+ if upperPlan == nil {
+ return nil, fmt.Errorf("cannot encode %v as element of range", upper)
+ }
+
+ buf, err = upperPlan.Encode(upper, buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
+ }
+ if buf == nil {
+ return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ }
+ }
+
+ switch upperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, fmt.Errorf("unknown upper bound type %v", upperType)
+ }
+
+ return buf, nil
+}
+
+func (c *RangeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case RangeScanner:
+ return &scanPlanBinaryRangeToRangeScanner{rc: c, m: m}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case RangeScanner:
+ return &scanPlanTextRangeToRangeScanner{rc: c, m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryRangeToRangeScanner struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *scanPlanBinaryRangeToRangeScanner) Scan(src []byte, target any) error {
+ rangeScanner := (target).(RangeScanner)
+
+ if src == nil {
+ return rangeScanner.ScanNull()
+ }
+
+ ubr, err := parseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ if ubr.LowerType == Empty {
+ return rangeScanner.SetBoundTypes(ubr.LowerType, ubr.UpperType)
+ }
+
+ lowerTarget, upperTarget := rangeScanner.ScanBounds()
+
+ if ubr.LowerType == Inclusive || ubr.LowerType == Exclusive {
+ lowerPlan := plan.m.PlanScan(plan.rc.ElementType.OID, BinaryFormatCode, lowerTarget)
+ if lowerPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", lowerTarget)
+ }
+
+ err = lowerPlan.Scan(ubr.Lower, lowerTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
+ }
+ }
+
+ if ubr.UpperType == Inclusive || ubr.UpperType == Exclusive {
+ upperPlan := plan.m.PlanScan(plan.rc.ElementType.OID, BinaryFormatCode, upperTarget)
+ if upperPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", upperTarget)
+ }
+
+ err = upperPlan.Scan(ubr.Upper, upperTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
+ }
+ }
+
+ return rangeScanner.SetBoundTypes(ubr.LowerType, ubr.UpperType)
+}
+
+type scanPlanTextRangeToRangeScanner struct {
+ rc *RangeCodec
+ m *Map
+}
+
+func (plan *scanPlanTextRangeToRangeScanner) Scan(src []byte, target any) error {
+ rangeScanner := (target).(RangeScanner)
+
+ if src == nil {
+ return rangeScanner.ScanNull()
+ }
+
+ utr, err := parseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ if utr.LowerType == Empty {
+ return rangeScanner.SetBoundTypes(utr.LowerType, utr.UpperType)
+ }
+
+ lowerTarget, upperTarget := rangeScanner.ScanBounds()
+
+ if utr.LowerType == Inclusive || utr.LowerType == Exclusive {
+ lowerPlan := plan.m.PlanScan(plan.rc.ElementType.OID, TextFormatCode, lowerTarget)
+ if lowerPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", lowerTarget)
+ }
+
+ err = lowerPlan.Scan([]byte(utr.Lower), lowerTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
+ }
+ }
+
+ if utr.UpperType == Inclusive || utr.UpperType == Exclusive {
+ upperPlan := plan.m.PlanScan(plan.rc.ElementType.OID, TextFormatCode, upperTarget)
+ if upperPlan == nil {
+ return fmt.Errorf("cannot scan into %v from range element", upperTarget)
+ }
+
+ err = upperPlan.Scan([]byte(utr.Upper), upperTarget)
+ if err != nil {
+ return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
+ }
+ }
+
+ return rangeScanner.SetBoundTypes(utr.LowerType, utr.UpperType)
+}
+
+func (c *RangeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (c *RangeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var r Range[any]
+ err := c.PlanScan(m, oid, format, &r).Scan(src, &r)
+ return r, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go
new file mode 100644
index 0000000..b3b1660
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/record_codec.go
@@ -0,0 +1,125 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// ArrayGetter is a type that can be converted into a PostgreSQL array.
+
+// RecordCodec is a codec for the generic PostgreSQL record type such as is created with the "row" function. Record can
+// only decode the binary format. The text format output format from PostgreSQL does not include type information and
+// is therefore impossible to decode. Encoding is impossible because PostgreSQL does not support input of generic
+// records.
+type RecordCodec struct{}
+
+func (RecordCodec) FormatSupported(format int16) bool {
+ return format == BinaryFormatCode
+}
+
+func (RecordCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (RecordCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ return nil
+}
+
+func (RecordCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ if format == BinaryFormatCode {
+ switch target.(type) {
+ case CompositeIndexScanner:
+ return &scanPlanBinaryRecordToCompositeIndexScanner{m: m}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryRecordToCompositeIndexScanner struct {
+ m *Map
+}
+
+func (plan *scanPlanBinaryRecordToCompositeIndexScanner) Scan(src []byte, target any) error {
+ targetScanner := (target).(CompositeIndexScanner)
+
+ if src == nil {
+ return targetScanner.ScanNull()
+ }
+
+ scanner := NewCompositeBinaryScanner(plan.m, src)
+ for i := 0; scanner.Next(); i++ {
+ fieldTarget := targetScanner.ScanIndex(i)
+ if fieldTarget != nil {
+ fieldPlan := plan.m.PlanScan(scanner.OID(), BinaryFormatCode, fieldTarget)
+ if fieldPlan == nil {
+ return fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), fieldTarget)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (RecordCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ return buf, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+}
+
+func (RecordCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch format {
+ case TextFormatCode:
+ return string(src), nil
+ case BinaryFormatCode:
+ scanner := NewCompositeBinaryScanner(m, src)
+ values := make([]any, scanner.FieldCount())
+ for i := 0; scanner.Next(); i++ {
+ var v any
+ fieldPlan := m.PlanScan(scanner.OID(), BinaryFormatCode, &v)
+ if fieldPlan == nil {
+ return nil, fmt.Errorf("unable to scan OID %d in binary format into %v", scanner.OID(), v)
+ }
+
+ err := fieldPlan.Scan(scanner.Bytes(), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ values[i] = v
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return values, nil
+ default:
+ return nil, fmt.Errorf("unknown format code %d", format)
+ }
+
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go
new file mode 100644
index 0000000..be1ca4a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types.go
@@ -0,0 +1,35 @@
+//go:build !nopgxregisterdefaulttypes
+
+package pgtype
+
+func registerDefaultPgTypeVariants[T any](m *Map, name string) {
+ arrayName := "_" + name
+
+ var value T
+ m.RegisterDefaultPgType(value, name) // T
+ m.RegisterDefaultPgType(&value, name) // *T
+
+ var sliceT []T
+ m.RegisterDefaultPgType(sliceT, arrayName) // []T
+ m.RegisterDefaultPgType(&sliceT, arrayName) // *[]T
+
+ var slicePtrT []*T
+ m.RegisterDefaultPgType(slicePtrT, arrayName) // []*T
+ m.RegisterDefaultPgType(&slicePtrT, arrayName) // *[]*T
+
+ var arrayOfT Array[T]
+ m.RegisterDefaultPgType(arrayOfT, arrayName) // Array[T]
+ m.RegisterDefaultPgType(&arrayOfT, arrayName) // *Array[T]
+
+ var arrayOfPtrT Array[*T]
+ m.RegisterDefaultPgType(arrayOfPtrT, arrayName) // Array[*T]
+ m.RegisterDefaultPgType(&arrayOfPtrT, arrayName) // *Array[*T]
+
+ var flatArrayOfT FlatArray[T]
+ m.RegisterDefaultPgType(flatArrayOfT, arrayName) // FlatArray[T]
+ m.RegisterDefaultPgType(&flatArrayOfT, arrayName) // *FlatArray[T]
+
+ var flatArrayOfPtrT FlatArray[*T]
+ m.RegisterDefaultPgType(flatArrayOfPtrT, arrayName) // FlatArray[*T]
+ m.RegisterDefaultPgType(&flatArrayOfPtrT, arrayName) // *FlatArray[*T]
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go
new file mode 100644
index 0000000..56fe7c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/register_default_pg_types_disabled.go
@@ -0,0 +1,6 @@
+//go:build nopgxregisterdefaulttypes
+
+package pgtype
+
+func registerDefaultPgTypeVariants[T any](m *Map, name string) {
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/text.go b/vendor/github.com/jackc/pgx/v5/pgtype/text.go
new file mode 100644
index 0000000..021ee33
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/text.go
@@ -0,0 +1,223 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+type TextScanner interface {
+ ScanText(v Text) error
+}
+
+type TextValuer interface {
+ TextValue() (Text, error)
+}
+
+type Text struct {
+ String string
+ Valid bool
+}
+
+func (t *Text) ScanText(v Text) error {
+ *t = v
+ return nil
+}
+
+func (t Text) TextValue() (Text, error) {
+ return t, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Text) Scan(src any) error {
+ if src == nil {
+ *dst = Text{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ *dst = Text{String: src, Valid: true}
+ return nil
+ case []byte:
+ *dst = Text{String: string(src), Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Text) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return src.String, nil
+}
+
+func (src Text) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ return json.Marshal(src.String)
+}
+
+func (dst *Text) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *dst = Text{}
+ } else {
+ *dst = Text{String: *s, Valid: true}
+ }
+
+ return nil
+}
+
+type TextCodec struct{}
+
+func (TextCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TextCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (TextCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch value.(type) {
+ case string:
+ return encodePlanTextCodecString{}
+ case []byte:
+ return encodePlanTextCodecByteSlice{}
+ case TextValuer:
+ return encodePlanTextCodecTextValuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanTextCodecString struct{}
+
+func (encodePlanTextCodecString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(string)
+ buf = append(buf, s...)
+ return buf, nil
+}
+
+type encodePlanTextCodecByteSlice struct{}
+
+func (encodePlanTextCodecByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.([]byte)
+ buf = append(buf, s...)
+ return buf, nil
+}
+
+type encodePlanTextCodecStringer struct{}
+
+func (encodePlanTextCodecStringer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ s := value.(fmt.Stringer)
+ buf = append(buf, s.String()...)
+ return buf, nil
+}
+
+type encodePlanTextCodecTextValuer struct{}
+
+func (encodePlanTextCodecTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ text, err := value.(TextValuer).TextValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !text.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, text.String...)
+ return buf, nil
+}
+
+func (TextCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case TextFormatCode, BinaryFormatCode:
+ switch target.(type) {
+ case *string:
+ return scanPlanTextAnyToString{}
+ case *[]byte:
+ return scanPlanAnyToNewByteSlice{}
+ case BytesScanner:
+ return scanPlanAnyToByteScanner{}
+ case TextScanner:
+ return scanPlanTextAnyToTextScanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c TextCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return c.DecodeValue(m, oid, format, src)
+}
+
+func (c TextCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ return string(src), nil
+}
+
+type scanPlanTextAnyToString struct{}
+
+func (scanPlanTextAnyToString) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ p := (dst).(*string)
+ *p = string(src)
+
+ return nil
+}
+
+type scanPlanAnyToNewByteSlice struct{}
+
+func (scanPlanAnyToNewByteSlice) Scan(src []byte, dst any) error {
+ p := (dst).(*[]byte)
+ if src == nil {
+ *p = nil
+ } else {
+ *p = make([]byte, len(src))
+ copy(*p, src)
+ }
+
+ return nil
+}
+
+type scanPlanAnyToByteScanner struct{}
+
+func (scanPlanAnyToByteScanner) Scan(src []byte, dst any) error {
+ p := (dst).(BytesScanner)
+ return p.ScanBytes(src)
+}
+
+type scanPlanTextAnyToTextScanner struct{}
+
+func (scanPlanTextAnyToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ return scanner.ScanText(Text{String: string(src), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go b/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go
new file mode 100644
index 0000000..d5e4cdb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/text_format_only_codec.go
@@ -0,0 +1,13 @@
+package pgtype
+
+type TextFormatOnlyCodec struct {
+ Codec
+}
+
+func (c *TextFormatOnlyCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode && c.Codec.FormatSupported(format)
+}
+
+func (TextFormatOnlyCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/tid.go b/vendor/github.com/jackc/pgx/v5/pgtype/tid.go
new file mode 100644
index 0000000..9bc2c2a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/tid.go
@@ -0,0 +1,241 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type TIDScanner interface {
+ ScanTID(v TID) error
+}
+
+type TIDValuer interface {
+ TIDValue() (TID, error)
+}
+
+// TID is PostgreSQL's Tuple Identifier type.
+//
+// When one does
+//
+// select ctid, * from some_table;
+//
+// it is the data type of the ctid hidden system column.
+//
+// It is currently implemented as a pair unsigned two byte integers.
+// Its conversion functions can be found in src/backend/utils/adt/tid.c
+// in the PostgreSQL sources.
+type TID struct {
+ BlockNumber uint32
+ OffsetNumber uint16
+ Valid bool
+}
+
+func (b *TID) ScanTID(v TID) error {
+ *b = v
+ return nil
+}
+
+func (b TID) TIDValue() (TID, error) {
+ return b, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TID) Scan(src any) error {
+ if src == nil {
+ *dst = TID{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return scanPlanTextAnyToTIDScanner{}.Scan([]byte(src), dst)
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src TID) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ buf, err := TIDCodec{}.PlanEncode(nil, 0, TextFormatCode, src).Encode(src, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type TIDCodec struct{}
+
+func (TIDCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TIDCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (TIDCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TIDValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTIDCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTIDCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTIDCodecBinary struct{}
+
+func (encodePlanTIDCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ tid, err := value.(TIDValuer).TIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !tid.Valid {
+ return nil, nil
+ }
+
+ buf = pgio.AppendUint32(buf, tid.BlockNumber)
+ buf = pgio.AppendUint16(buf, tid.OffsetNumber)
+ return buf, nil
+}
+
+type encodePlanTIDCodecText struct{}
+
+func (encodePlanTIDCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ tid, err := value.(TIDValuer).TIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !tid.Valid {
+ return nil, nil
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%d,%d)`, tid.BlockNumber, tid.OffsetNumber)...)
+ return buf, nil
+}
+
+func (TIDCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TIDScanner:
+ return scanPlanBinaryTIDToTIDScanner{}
+ case TextScanner:
+ return scanPlanBinaryTIDToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TIDScanner:
+ return scanPlanTextAnyToTIDScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTIDToTIDScanner struct{}
+
+func (scanPlanBinaryTIDToTIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TIDScanner)
+
+ if src == nil {
+ return scanner.ScanTID(TID{})
+ }
+
+ if len(src) != 6 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ return scanner.ScanTID(TID{
+ BlockNumber: binary.BigEndian.Uint32(src),
+ OffsetNumber: binary.BigEndian.Uint16(src[4:]),
+ Valid: true,
+ })
+}
+
+type scanPlanBinaryTIDToTextScanner struct{}
+
+func (scanPlanBinaryTIDToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ if len(src) != 6 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ blockNumber := binary.BigEndian.Uint32(src)
+ offsetNumber := binary.BigEndian.Uint16(src[4:])
+
+ return scanner.ScanText(Text{
+ String: fmt.Sprintf(`(%d,%d)`, blockNumber, offsetNumber),
+ Valid: true,
+ })
+}
+
+type scanPlanTextAnyToTIDScanner struct{}
+
+func (scanPlanTextAnyToTIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TIDScanner)
+
+ if src == nil {
+ return scanner.ScanTID(TID{})
+ }
+
+ if len(src) < 5 {
+ return fmt.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ block, offset, found := strings.Cut(string(src[1:len(src)-1]), ",")
+ if !found {
+ return fmt.Errorf("invalid format for tid")
+ }
+
+ blockNumber, err := strconv.ParseUint(block, 10, 32)
+ if err != nil {
+ return err
+ }
+
+ offsetNumber, err := strconv.ParseUint(offset, 10, 16)
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanTID(TID{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber), Valid: true})
+}
+
+func (c TIDCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c TIDCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tid TID
+ err := codecScan(c, m, oid, format, src, &tid)
+ if err != nil {
+ return nil, err
+ }
+ return tid, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/time.go b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
new file mode 100644
index 0000000..61a3abd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
@@ -0,0 +1,272 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type TimeScanner interface {
+ ScanTime(v Time) error
+}
+
+type TimeValuer interface {
+ TimeValue() (Time, error)
+}
+
+// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
+//
+// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time
+// and date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due
+// to needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
+type Time struct {
+ Microseconds int64 // Number of microseconds since midnight
+ Valid bool
+}
+
+func (t *Time) ScanTime(v Time) error {
+ *t = v
+ return nil
+}
+
+func (t Time) TimeValue() (Time, error) {
+ return t, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (t *Time) Scan(src any) error {
+ if src == nil {
+ *t = Time{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ err := scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t)
+ if err != nil {
+ t.Microseconds = 0
+ t.Valid = false
+ }
+ return err
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (t Time) Value() (driver.Value, error) {
+ if !t.Valid {
+ return nil, nil
+ }
+
+ buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, t).Encode(t, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), err
+}
+
+type TimeCodec struct{}
+
+func (TimeCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (TimeCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (TimeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimeValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimeCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimeCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimeCodecBinary struct{}
+
+func (encodePlanTimeCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TimeValuer).TimeValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !t.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendInt64(buf, t.Microseconds), nil
+}
+
+type encodePlanTimeCodecText struct{}
+
+func (encodePlanTimeCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ t, err := value.(TimeValuer).TimeValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !t.Valid {
+ return nil, nil
+ }
+
+ usec := t.Microseconds
+ hours := usec / microsecondsPerHour
+ usec -= hours * microsecondsPerHour
+ minutes := usec / microsecondsPerMinute
+ usec -= minutes * microsecondsPerMinute
+ seconds := usec / microsecondsPerSecond
+ usec -= seconds * microsecondsPerSecond
+
+ s := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, usec)
+
+ return append(buf, s...), nil
+}
+
+func (TimeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimeScanner:
+ return scanPlanBinaryTimeToTimeScanner{}
+ case TextScanner:
+ return scanPlanBinaryTimeToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimeScanner:
+ return scanPlanTextAnyToTimeScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimeToTimeScanner struct{}
+
+func (scanPlanBinaryTimeToTimeScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimeScanner)
+
+ if src == nil {
+ return scanner.ScanTime(Time{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for time: %v", len(src))
+ }
+
+ usec := int64(binary.BigEndian.Uint64(src))
+
+ return scanner.ScanTime(Time{Microseconds: usec, Valid: true})
+}
+
+type scanPlanBinaryTimeToTextScanner struct{}
+
+func (scanPlanBinaryTimeToTextScanner) Scan(src []byte, dst any) error {
+ ts, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return ts.ScanText(Text{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for time: %v", len(src))
+ }
+
+ usec := int64(binary.BigEndian.Uint64(src))
+
+ tim := Time{Microseconds: usec, Valid: true}
+
+ buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, tim).Encode(tim, nil)
+ if err != nil {
+ return err
+ }
+
+ return ts.ScanText(Text{String: string(buf), Valid: true})
+}
+
+type scanPlanTextAnyToTimeScanner struct{}
+
+func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimeScanner)
+
+ if src == nil {
+ return scanner.ScanTime(Time{})
+ }
+
+ s := string(src)
+
+ if len(s) < 8 || s[2] != ':' || s[5] != ':' {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ hours, err := strconv.ParseInt(s[0:2], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec := hours * microsecondsPerHour
+
+ minutes, err := strconv.ParseInt(s[3:5], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec += minutes * microsecondsPerMinute
+
+ seconds, err := strconv.ParseInt(s[6:8], 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+ usec += seconds * microsecondsPerSecond
+
+ if len(s) > 9 {
+ if s[8] != '.' || len(s) > 15 {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ fraction := s[9:]
+ n, err := strconv.ParseInt(fraction, 10, 64)
+ if err != nil {
+ return fmt.Errorf("cannot decode %v into Time", s)
+ }
+
+ for i := len(fraction); i < 6; i++ {
+ n *= 10
+ }
+
+ usec += n
+ }
+
+ return scanner.ScanTime(Time{Microseconds: usec, Valid: true})
+}
+
+func (c TimeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ return codecDecodeToTextFormat(c, m, oid, format, src)
+}
+
+func (c TimeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var t Time
+ err := codecScan(c, m, oid, format, src, &t)
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go
new file mode 100644
index 0000000..677a2c6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamp.go
@@ -0,0 +1,356 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const pgTimestampFormat = "2006-01-02 15:04:05.999999999"
+
+type TimestampScanner interface {
+ ScanTimestamp(v Timestamp) error
+}
+
+type TimestampValuer interface {
+ TimestampValue() (Timestamp, error)
+}
+
+// Timestamp represents the PostgreSQL timestamp type.
+type Timestamp struct {
+ Time time.Time // Time zone will be ignored when encoding to PostgreSQL.
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (ts *Timestamp) ScanTimestamp(v Timestamp) error {
+ *ts = v
+ return nil
+}
+
+func (ts Timestamp) TimestampValue() (Timestamp, error) {
+ return ts, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (ts *Timestamp) Scan(src any) error {
+ if src == nil {
+ *ts = Timestamp{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return (&scanPlanTextTimestampToTimestampScanner{}).Scan([]byte(src), ts)
+ case time.Time:
+ *ts = Timestamp{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (ts Timestamp) Value() (driver.Value, error) {
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier.String(), nil
+ }
+ return ts.Time, nil
+}
+
+func (ts Timestamp) MarshalJSON() ([]byte, error) {
+ if !ts.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+ s = ts.Time.Format(time.RFC3339Nano)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (ts *Timestamp) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *ts = Timestamp{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ // PostgreSQL uses ISO 8601 for to_json function and casting from a string to timestamptz
+ tim, err := time.Parse(time.RFC3339Nano, *s)
+ if err != nil {
+ return err
+ }
+
+ *ts = Timestamp{Time: tim, Valid: true}
+ }
+
+ return nil
+}
+
+type TimestampCodec struct {
+ // ScanLocation is the location that the time is assumed to be in for scanning. This is different from
+ // TimestamptzCodec.ScanLocation in that this setting does change the instant in time that the timestamp represents.
+ ScanLocation *time.Location
+}
+
+func (*TimestampCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*TimestampCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (*TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimestampValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimestampCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimestampCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimestampCodecBinary struct{}
+
+func (encodePlanTimestampCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestampValuer).TimestampValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var microsecSinceY2K int64
+ switch ts.InfinityModifier {
+ case Finite:
+ t := discardTimeZone(ts.Time)
+ microsecSinceUnixEpoch := t.Unix()*1000000 + int64(t.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ buf = pgio.AppendInt64(buf, microsecSinceY2K)
+
+ return buf, nil
+}
+
+type encodePlanTimestampCodecText struct{}
+
+func (encodePlanTimestampCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestampValuer).TimestampValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+ t := discardTimeZone(ts.Time)
+
+ // Year 0000 is 1 BC
+ bc := false
+ if year := t.Year(); year <= 0 {
+ year = -year + 1
+ t = time.Date(year, t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ bc = true
+ }
+
+ s = t.Truncate(time.Microsecond).Format(pgTimestampFormat)
+
+ if bc {
+ s = s + " BC"
+ }
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ buf = append(buf, s...)
+
+ return buf, nil
+}
+
+func discardTimeZone(t time.Time) time.Time {
+ if t.Location() != time.UTC {
+ return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ }
+
+ return t
+}
+
+func (c *TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimestampScanner:
+ return &scanPlanBinaryTimestampToTimestampScanner{location: c.ScanLocation}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimestampScanner:
+ return &scanPlanTextTimestampToTimestampScanner{location: c.ScanLocation}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimestampToTimestampScanner struct{ location *time.Location }
+
+func (plan *scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestampScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamp(Timestamp{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for timestamp: %v", len(src))
+ }
+
+ var ts Timestamp
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ tim := time.Unix(
+ microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
+ (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
+ ).UTC()
+ if plan.location != nil {
+ tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
+ }
+ ts = Timestamp{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamp(ts)
+}
+
+type scanPlanTextTimestampToTimestampScanner struct{ location *time.Location }
+
+func (plan *scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestampScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamp(Timestamp{})
+ }
+
+ var ts Timestamp
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ ts = Timestamp{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
+ default:
+ bc := false
+ if strings.HasSuffix(sbuf, " BC") {
+ sbuf = sbuf[:len(sbuf)-3]
+ bc = true
+ }
+ tim, err := time.Parse(pgTimestampFormat, sbuf)
+ if err != nil {
+ return err
+ }
+
+ if bc {
+ year := -tim.Year() + 1
+ tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
+ }
+
+ if plan.location != nil {
+ tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
+ }
+
+ ts = Timestamp{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamp(ts)
+}
+
+func (c *TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var ts Timestamp
+ err := codecScan(c, m, oid, format, src, &ts)
+ if err != nil {
+ return nil, err
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier.String(), nil
+ }
+
+ return ts.Time, nil
+}
+
+func (c *TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var ts Timestamp
+ err := codecScan(c, m, oid, format, src, &ts)
+ if err != nil {
+ return nil, err
+ }
+
+ if ts.InfinityModifier != Finite {
+ return ts.InfinityModifier, nil
+ }
+
+ return ts.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go
new file mode 100644
index 0000000..7efbcff
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/timestamptz.go
@@ -0,0 +1,366 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+const pgTimestamptzHourFormat = "2006-01-02 15:04:05.999999999Z07"
+const pgTimestamptzMinuteFormat = "2006-01-02 15:04:05.999999999Z07:00"
+const pgTimestamptzSecondFormat = "2006-01-02 15:04:05.999999999Z07:00:00"
+const microsecFromUnixEpochToY2K = 946684800 * 1000000
+
+const (
+ negativeInfinityMicrosecondOffset = -9223372036854775808
+ infinityMicrosecondOffset = 9223372036854775807
+)
+
+type TimestamptzScanner interface {
+ ScanTimestamptz(v Timestamptz) error
+}
+
+type TimestamptzValuer interface {
+ TimestamptzValue() (Timestamptz, error)
+}
+
+// Timestamptz represents the PostgreSQL timestamptz type.
+type Timestamptz struct {
+ Time time.Time
+ InfinityModifier InfinityModifier
+ Valid bool
+}
+
+func (tstz *Timestamptz) ScanTimestamptz(v Timestamptz) error {
+ *tstz = v
+ return nil
+}
+
+func (tstz Timestamptz) TimestamptzValue() (Timestamptz, error) {
+ return tstz, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (tstz *Timestamptz) Scan(src any) error {
+ if src == nil {
+ *tstz = Timestamptz{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return (&scanPlanTextTimestamptzToTimestamptzScanner{}).Scan([]byte(src), tstz)
+ case time.Time:
+ *tstz = Timestamptz{Time: src, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (tstz Timestamptz) Value() (driver.Value, error) {
+ if !tstz.Valid {
+ return nil, nil
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier.String(), nil
+ }
+ return tstz.Time, nil
+}
+
+func (tstz Timestamptz) MarshalJSON() ([]byte, error) {
+ if !tstz.Valid {
+ return []byte("null"), nil
+ }
+
+ var s string
+
+ switch tstz.InfinityModifier {
+ case Finite:
+ s = tstz.Time.Format(time.RFC3339Nano)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return json.Marshal(s)
+}
+
+func (tstz *Timestamptz) UnmarshalJSON(b []byte) error {
+ var s *string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ if s == nil {
+ *tstz = Timestamptz{}
+ return nil
+ }
+
+ switch *s {
+ case "infinity":
+ *tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ *tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ // PostgreSQL uses ISO 8601 for to_json function and casting from a string to timestamptz
+ tim, err := time.Parse(time.RFC3339Nano, *s)
+ if err != nil {
+ return err
+ }
+
+ *tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return nil
+}
+
+type TimestamptzCodec struct {
+ // ScanLocation is the location to return scanned timestamptz values in. This does not change the instant in time that
+ // the timestamptz represents.
+ ScanLocation *time.Location
+}
+
+func (*TimestamptzCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*TimestamptzCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (*TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(TimestamptzValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanTimestamptzCodecBinary{}
+ case TextFormatCode:
+ return encodePlanTimestamptzCodecText{}
+ }
+
+ return nil
+}
+
+type encodePlanTimestamptzCodecBinary struct{}
+
+func (encodePlanTimestamptzCodecBinary) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestamptzValuer).TimestamptzValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var microsecSinceY2K int64
+ switch ts.InfinityModifier {
+ case Finite:
+ microsecSinceUnixEpoch := ts.Time.Unix()*1000000 + int64(ts.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ buf = pgio.AppendInt64(buf, microsecSinceY2K)
+
+ return buf, nil
+}
+
+type encodePlanTimestamptzCodecText struct{}
+
+func (encodePlanTimestamptzCodecText) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ ts, err := value.(TimestamptzValuer).TimestamptzValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !ts.Valid {
+ return nil, nil
+ }
+
+ var s string
+
+ switch ts.InfinityModifier {
+ case Finite:
+
+ t := ts.Time.UTC().Truncate(time.Microsecond)
+
+ // Year 0000 is 1 BC
+ bc := false
+ if year := t.Year(); year <= 0 {
+ year = -year + 1
+ t = time.Date(year, t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC)
+ bc = true
+ }
+
+ s = t.Format(pgTimestamptzSecondFormat)
+
+ if bc {
+ s = s + " BC"
+ }
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ buf = append(buf, s...)
+
+ return buf, nil
+}
+
+func (c *TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case TimestamptzScanner:
+ return &scanPlanBinaryTimestamptzToTimestamptzScanner{location: c.ScanLocation}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case TimestamptzScanner:
+ return &scanPlanTextTimestamptzToTimestamptzScanner{location: c.ScanLocation}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryTimestamptzToTimestamptzScanner struct{ location *time.Location }
+
+func (plan *scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestamptzScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamptz(Timestamptz{})
+ }
+
+ if len(src) != 8 {
+ return fmt.Errorf("invalid length for timestamptz: %v", len(src))
+ }
+
+ var tstz Timestamptz
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ tim := time.Unix(
+ microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
+ (microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
+ )
+ if plan.location != nil {
+ tim = tim.In(plan.location)
+ }
+ tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamptz(tstz)
+}
+
+type scanPlanTextTimestamptzToTimestamptzScanner struct{ location *time.Location }
+
+func (plan *scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TimestamptzScanner)
+
+ if src == nil {
+ return scanner.ScanTimestamptz(Timestamptz{})
+ }
+
+ var tstz Timestamptz
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ tstz = Timestamptz{Valid: true, InfinityModifier: Infinity}
+ case "-infinity":
+ tstz = Timestamptz{Valid: true, InfinityModifier: -Infinity}
+ default:
+ bc := false
+ if strings.HasSuffix(sbuf, " BC") {
+ sbuf = sbuf[:len(sbuf)-3]
+ bc = true
+ }
+
+ var format string
+ if len(sbuf) >= 9 && (sbuf[len(sbuf)-9] == '-' || sbuf[len(sbuf)-9] == '+') {
+ format = pgTimestamptzSecondFormat
+ } else if len(sbuf) >= 6 && (sbuf[len(sbuf)-6] == '-' || sbuf[len(sbuf)-6] == '+') {
+ format = pgTimestamptzMinuteFormat
+ } else {
+ format = pgTimestamptzHourFormat
+ }
+
+ tim, err := time.Parse(format, sbuf)
+ if err != nil {
+ return err
+ }
+
+ if bc {
+ year := -tim.Year() + 1
+ tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
+ }
+
+ if plan.location != nil {
+ tim = tim.In(plan.location)
+ }
+
+ tstz = Timestamptz{Time: tim, Valid: true}
+ }
+
+ return scanner.ScanTimestamptz(tstz)
+}
+
+func (c *TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tstz Timestamptz
+ err := codecScan(c, m, oid, format, src, &tstz)
+ if err != nil {
+ return nil, err
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier.String(), nil
+ }
+
+ return tstz.Time, nil
+}
+
+func (c *TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var tstz Timestamptz
+ err := codecScan(c, m, oid, format, src, &tstz)
+ if err != nil {
+ return nil, err
+ }
+
+ if tstz.InfinityModifier != Finite {
+ return tstz.InfinityModifier, nil
+ }
+
+ return tstz.Time, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
new file mode 100644
index 0000000..098c516
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
@@ -0,0 +1,303 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+)
+
+type Uint32Scanner interface {
+ ScanUint32(v Uint32) error
+}
+
+type Uint32Valuer interface {
+ Uint32Value() (Uint32, error)
+}
+
+// Uint32 is the core type that is used to represent PostgreSQL types such as OID, CID, and XID.
+type Uint32 struct {
+ Uint32 uint32
+ Valid bool
+}
+
+func (n *Uint32) ScanUint32(v Uint32) error {
+ *n = v
+ return nil
+}
+
+func (n Uint32) Uint32Value() (Uint32, error) {
+ return n, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Uint32) Scan(src any) error {
+ if src == nil {
+ *dst = Uint32{}
+ return nil
+ }
+
+ var n int64
+
+ switch src := src.(type) {
+ case int64:
+ n = src
+ case string:
+ un, err := strconv.ParseUint(src, 10, 32)
+ if err != nil {
+ return err
+ }
+ n = int64(un)
+ default:
+ return fmt.Errorf("cannot scan %T", src)
+ }
+
+ if n < 0 {
+ return fmt.Errorf("%d is less than the minimum value for Uint32", n)
+ }
+ if n > math.MaxUint32 {
+ return fmt.Errorf("%d is greater than maximum value for Uint32", n)
+ }
+
+ *dst = Uint32{Uint32: uint32(n), Valid: true}
+
+ return nil
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Uint32) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+ return int64(src.Uint32), nil
+}
+
+type Uint32Codec struct{}
+
+func (Uint32Codec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (Uint32Codec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (Uint32Codec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch format {
+ case BinaryFormatCode:
+ switch value.(type) {
+ case uint32:
+ return encodePlanUint32CodecBinaryUint32{}
+ case Uint32Valuer:
+ return encodePlanUint32CodecBinaryUint32Valuer{}
+ case Int64Valuer:
+ return encodePlanUint32CodecBinaryInt64Valuer{}
+ }
+ case TextFormatCode:
+ switch value.(type) {
+ case uint32:
+ return encodePlanUint32CodecTextUint32{}
+ case Int64Valuer:
+ return encodePlanUint32CodecTextInt64Valuer{}
+ }
+ }
+
+ return nil
+}
+
+type encodePlanUint32CodecBinaryUint32 struct{}
+
+func (encodePlanUint32CodecBinaryUint32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint32)
+ return pgio.AppendUint32(buf, v), nil
+}
+
+type encodePlanUint32CodecBinaryUint32Valuer struct{}
+
+func (encodePlanUint32CodecBinaryUint32Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint32Valuer).Uint32Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return pgio.AppendUint32(buf, v.Uint32), nil
+}
+
+type encodePlanUint32CodecBinaryInt64Valuer struct{}
+
+func (encodePlanUint32CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+
+ return pgio.AppendUint32(buf, uint32(v.Int64)), nil
+}
+
+type encodePlanUint32CodecTextUint32 struct{}
+
+func (encodePlanUint32CodecTextUint32) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v := value.(uint32)
+ return append(buf, strconv.FormatUint(uint64(v), 10)...), nil
+}
+
+type encodePlanUint32CodecTextUint32Valuer struct{}
+
+func (encodePlanUint32CodecTextUint32Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Uint32Valuer).Uint32Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ return append(buf, strconv.FormatUint(uint64(v.Uint32), 10)...), nil
+}
+
+type encodePlanUint32CodecTextInt64Valuer struct{}
+
+func (encodePlanUint32CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ v, err := value.(Int64Valuer).Int64Value()
+ if err != nil {
+ return nil, err
+ }
+
+ if !v.Valid {
+ return nil, nil
+ }
+
+ if v.Int64 < 0 {
+ return nil, fmt.Errorf("%d is less than minimum value for uint32", v.Int64)
+ }
+ if v.Int64 > math.MaxUint32 {
+ return nil, fmt.Errorf("%d is greater than maximum value for uint32", v.Int64)
+ }
+
+ return append(buf, strconv.FormatInt(v.Int64, 10)...), nil
+}
+
+func (Uint32Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case *uint32:
+ return scanPlanBinaryUint32ToUint32{}
+ case Uint32Scanner:
+ return scanPlanBinaryUint32ToUint32Scanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case *uint32:
+ return scanPlanTextAnyToUint32{}
+ case Uint32Scanner:
+ return scanPlanTextAnyToUint32Scanner{}
+ }
+ }
+
+ return nil
+}
+
+func (c Uint32Codec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return int64(n), nil
+}
+
+func (c Uint32Codec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var n uint32
+ err := codecScan(c, m, oid, format, src, &n)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
+
+type scanPlanBinaryUint32ToUint32 struct{}
+
+func (scanPlanBinaryUint32ToUint32) Scan(src []byte, dst any) error {
+ if src == nil {
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ p := (dst).(*uint32)
+ *p = binary.BigEndian.Uint32(src)
+
+ return nil
+}
+
+type scanPlanBinaryUint32ToUint32Scanner struct{}
+
+func (scanPlanBinaryUint32ToUint32Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint32Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint32(Uint32{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+
+ return s.ScanUint32(Uint32{Uint32: n, Valid: true})
+}
+
+type scanPlanTextAnyToUint32Scanner struct{}
+
+func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(Uint32Scanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanUint32(Uint32{})
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ return s.ScanUint32(Uint32{Uint32: uint32(n), Valid: true})
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go
new file mode 100644
index 0000000..d57c0f2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go
@@ -0,0 +1,281 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+)
+
+type UUIDScanner interface {
+ ScanUUID(v UUID) error
+}
+
+type UUIDValuer interface {
+ UUIDValue() (UUID, error)
+}
+
+type UUID struct {
+ Bytes [16]byte
+ Valid bool
+}
+
+func (b *UUID) ScanUUID(v UUID) error {
+ *b = v
+ return nil
+}
+
+func (b UUID) UUIDValue() (UUID, error) {
+ return b, nil
+}
+
+// parseUUID converts a string UUID in standard form to a byte array.
+func parseUUID(src string) (dst [16]byte, err error) {
+ switch len(src) {
+ case 36:
+ src = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]
+ case 32:
+ // dashes already stripped, assume valid
+ default:
+ // assume invalid.
+ return dst, fmt.Errorf("cannot parse UUID %v", src)
+ }
+
+ buf, err := hex.DecodeString(src)
+ if err != nil {
+ return dst, err
+ }
+
+ copy(dst[:], buf)
+ return dst, err
+}
+
+// encodeUUID converts a uuid byte array to UUID standard string form.
+func encodeUUID(src [16]byte) string {
+ var buf [36]byte
+
+ hex.Encode(buf[0:8], src[:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], src[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], src[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], src[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], src[10:])
+
+ return string(buf[:])
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUID) Scan(src any) error {
+ if src == nil {
+ *dst = UUID{}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ buf, err := parseUUID(src)
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: buf, Valid: true}
+ return nil
+ }
+
+ return fmt.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src UUID) Value() (driver.Value, error) {
+ if !src.Valid {
+ return nil, nil
+ }
+
+ return encodeUUID(src.Bytes), nil
+}
+
+func (src UUID) MarshalJSON() ([]byte, error) {
+ if !src.Valid {
+ return []byte("null"), nil
+ }
+
+ var buff bytes.Buffer
+ buff.WriteByte('"')
+ buff.WriteString(encodeUUID(src.Bytes))
+ buff.WriteByte('"')
+ return buff.Bytes(), nil
+}
+
+func (dst *UUID) UnmarshalJSON(src []byte) error {
+ if bytes.Equal(src, []byte("null")) {
+ *dst = UUID{}
+ return nil
+ }
+ if len(src) != 38 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+ buf, err := parseUUID(string(src[1 : len(src)-1]))
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: buf, Valid: true}
+ return nil
+}
+
+type UUIDCodec struct{}
+
+func (UUIDCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (UUIDCodec) PreferredFormat() int16 {
+ return BinaryFormatCode
+}
+
+func (UUIDCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ if _, ok := value.(UUIDValuer); !ok {
+ return nil
+ }
+
+ switch format {
+ case BinaryFormatCode:
+ return encodePlanUUIDCodecBinaryUUIDValuer{}
+ case TextFormatCode:
+ return encodePlanUUIDCodecTextUUIDValuer{}
+ }
+
+ return nil
+}
+
+type encodePlanUUIDCodecBinaryUUIDValuer struct{}
+
+func (encodePlanUUIDCodecBinaryUUIDValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ uuid, err := value.(UUIDValuer).UUIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !uuid.Valid {
+ return nil, nil
+ }
+
+ return append(buf, uuid.Bytes[:]...), nil
+}
+
+type encodePlanUUIDCodecTextUUIDValuer struct{}
+
+func (encodePlanUUIDCodecTextUUIDValuer) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ uuid, err := value.(UUIDValuer).UUIDValue()
+ if err != nil {
+ return nil, err
+ }
+
+ if !uuid.Valid {
+ return nil, nil
+ }
+
+ return append(buf, encodeUUID(uuid.Bytes)...), nil
+}
+
+func (UUIDCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch format {
+ case BinaryFormatCode:
+ switch target.(type) {
+ case UUIDScanner:
+ return scanPlanBinaryUUIDToUUIDScanner{}
+ case TextScanner:
+ return scanPlanBinaryUUIDToTextScanner{}
+ }
+ case TextFormatCode:
+ switch target.(type) {
+ case UUIDScanner:
+ return scanPlanTextAnyToUUIDScanner{}
+ }
+ }
+
+ return nil
+}
+
+type scanPlanBinaryUUIDToUUIDScanner struct{}
+
+func (scanPlanBinaryUUIDToUUIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(UUIDScanner)
+
+ if src == nil {
+ return scanner.ScanUUID(UUID{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ uuid := UUID{Valid: true}
+ copy(uuid.Bytes[:], src)
+
+ return scanner.ScanUUID(uuid)
+}
+
+type scanPlanBinaryUUIDToTextScanner struct{}
+
+func (scanPlanBinaryUUIDToTextScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(TextScanner)
+
+ if src == nil {
+ return scanner.ScanText(Text{})
+ }
+
+ if len(src) != 16 {
+ return fmt.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ var buf [16]byte
+ copy(buf[:], src)
+
+ return scanner.ScanText(Text{String: encodeUUID(buf), Valid: true})
+}
+
+type scanPlanTextAnyToUUIDScanner struct{}
+
+func (scanPlanTextAnyToUUIDScanner) Scan(src []byte, dst any) error {
+ scanner := (dst).(UUIDScanner)
+
+ if src == nil {
+ return scanner.ScanUUID(UUID{})
+ }
+
+ buf, err := parseUUID(string(src))
+ if err != nil {
+ return err
+ }
+
+ return scanner.ScanUUID(UUID{Bytes: buf, Valid: true})
+}
+
+func (c UUIDCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var uuid UUID
+ err := codecScan(c, m, oid, format, src, &uuid)
+ if err != nil {
+ return nil, err
+ }
+
+ return encodeUUID(uuid.Bytes), nil
+}
+
+func (c UUIDCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var uuid UUID
+ err := codecScan(c, m, oid, format, src, &uuid)
+ if err != nil {
+ return nil, err
+ }
+ return uuid.Bytes, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go b/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
new file mode 100644
index 0000000..5d5c681
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
@@ -0,0 +1,52 @@
+package pgxpool
+
+import (
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+type errBatchResults struct {
+ err error
+}
+
+func (br errBatchResults) Exec() (pgconn.CommandTag, error) {
+ return pgconn.CommandTag{}, br.err
+}
+
+func (br errBatchResults) Query() (pgx.Rows, error) {
+ return errRows{err: br.err}, br.err
+}
+
+func (br errBatchResults) QueryRow() pgx.Row {
+ return errRow{err: br.err}
+}
+
+func (br errBatchResults) Close() error {
+ return br.err
+}
+
+type poolBatchResults struct {
+ br pgx.BatchResults
+ c *Conn
+}
+
+func (br *poolBatchResults) Exec() (pgconn.CommandTag, error) {
+ return br.br.Exec()
+}
+
+func (br *poolBatchResults) Query() (pgx.Rows, error) {
+ return br.br.Query()
+}
+
+func (br *poolBatchResults) QueryRow() pgx.Row {
+ return br.br.QueryRow()
+}
+
+func (br *poolBatchResults) Close() error {
+ err := br.br.Close()
+ if br.c != nil {
+ br.c.Release()
+ br.c = nil
+ }
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
new file mode 100644
index 0000000..38c90f3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
@@ -0,0 +1,134 @@
+package pgxpool
+
+import (
+ "context"
+ "sync/atomic"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/puddle/v2"
+)
+
+// Conn is an acquired *pgx.Conn from a Pool.
+type Conn struct {
+ res *puddle.Resource[*connResource]
+ p *Pool
+}
+
+// Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called.
+// However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored.
+func (c *Conn) Release() {
+ if c.res == nil {
+ return
+ }
+
+ conn := c.Conn()
+ res := c.res
+ c.res = nil
+
+ if c.p.releaseTracer != nil {
+ c.p.releaseTracer.TraceRelease(c.p, TraceReleaseData{Conn: conn})
+ }
+
+ if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ return
+ }
+
+ // If the pool is consistently being used, we might never get to check the
+ // lifetime of a connection since we only check idle connections in checkConnsHealth
+ // so we also check the lifetime here and force a health check
+ if c.p.isExpired(res) {
+ atomic.AddInt64(&c.p.lifetimeDestroyCount, 1)
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ return
+ }
+
+ if c.p.afterRelease == nil {
+ res.Release()
+ return
+ }
+
+ go func() {
+ if c.p.afterRelease(conn) {
+ res.Release()
+ } else {
+ res.Destroy()
+ // Signal to the health check to run since we just destroyed a connections
+ // and we might be below minConns now
+ c.p.triggerHealthCheck()
+ }
+ }()
+}
+
+// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack
+// will panic if called on an already released or hijacked connection.
+func (c *Conn) Hijack() *pgx.Conn {
+ if c.res == nil {
+ panic("cannot hijack already released or hijacked connection")
+ }
+
+ conn := c.Conn()
+ res := c.res
+ c.res = nil
+
+ res.Hijack()
+
+ return conn
+}
+
+func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ return c.Conn().Exec(ctx, sql, arguments...)
+}
+
+func (c *Conn) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ return c.Conn().Query(ctx, sql, args...)
+}
+
+func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ return c.Conn().QueryRow(ctx, sql, args...)
+}
+
+func (c *Conn) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ return c.Conn().SendBatch(ctx, b)
+}
+
+func (c *Conn) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// Begin starts a transaction block from the *Conn without explicitly setting a transaction mode (see BeginTx with TxOptions if transaction mode is required).
+func (c *Conn) Begin(ctx context.Context) (pgx.Tx, error) {
+ return c.Conn().Begin(ctx)
+}
+
+// BeginTx starts a transaction block from the *Conn with txOptions determining the transaction mode.
+func (c *Conn) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+ return c.Conn().BeginTx(ctx, txOptions)
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ return c.Conn().Ping(ctx)
+}
+
+func (c *Conn) Conn() *pgx.Conn {
+ return c.connResource().conn
+}
+
+func (c *Conn) connResource() *connResource {
+ return c.res.Value()
+}
+
+func (c *Conn) getPoolRow(r pgx.Row) *poolRow {
+ return c.connResource().getPoolRow(c, r)
+}
+
+func (c *Conn) getPoolRows(r pgx.Rows) *poolRows {
+ return c.connResource().getPoolRows(c, r)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
new file mode 100644
index 0000000..099443b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
@@ -0,0 +1,27 @@
+// Package pgxpool is a concurrency-safe connection pool for pgx.
+/*
+pgxpool implements a nearly identical interface to pgx connections.
+
+Creating a Pool
+
+The primary way of creating a pool is with [pgxpool.New]:
+
+ pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
+
+The database connection string can be in URL or keyword/value format. PostgreSQL settings, pgx settings, and pool settings can be
+specified here. In addition, a config struct can be created by [ParseConfig].
+
+ config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
+ if err != nil {
+ // ...
+ }
+ config.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error {
+ // do something with every new connection
+ }
+
+ pool, err := pgxpool.NewWithConfig(context.Background(), config)
+
+A pool returns without waiting for any connections to be established. Acquire a connection immediately after creating
+the pool to check if a connection can successfully be established.
+*/
+package pgxpool
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
new file mode 100644
index 0000000..fdcba72
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
@@ -0,0 +1,717 @@
+package pgxpool
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/puddle/v2"
+)
+
+var defaultMaxConns = int32(4)
+var defaultMinConns = int32(0)
+var defaultMaxConnLifetime = time.Hour
+var defaultMaxConnIdleTime = time.Minute * 30
+var defaultHealthCheckPeriod = time.Minute
+
+type connResource struct {
+ conn *pgx.Conn
+ conns []Conn
+ poolRows []poolRow
+ poolRowss []poolRows
+ maxAgeTime time.Time
+}
+
+func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
+ if len(cr.conns) == 0 {
+ cr.conns = make([]Conn, 128)
+ }
+
+ c := &cr.conns[len(cr.conns)-1]
+ cr.conns = cr.conns[0 : len(cr.conns)-1]
+
+ c.res = res
+ c.p = p
+
+ return c
+}
+
+func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
+ if len(cr.poolRows) == 0 {
+ cr.poolRows = make([]poolRow, 128)
+ }
+
+ pr := &cr.poolRows[len(cr.poolRows)-1]
+ cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
+
+ pr.c = c
+ pr.r = r
+
+ return pr
+}
+
+func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
+ if len(cr.poolRowss) == 0 {
+ cr.poolRowss = make([]poolRows, 128)
+ }
+
+ pr := &cr.poolRowss[len(cr.poolRowss)-1]
+ cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
+
+ pr.c = c
+ pr.r = r
+
+ return pr
+}
+
+// Pool allows for connection reuse.
+type Pool struct {
+ // 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
+ // architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288.
+ newConnsCount int64
+ lifetimeDestroyCount int64
+ idleDestroyCount int64
+
+ p *puddle.Pool[*connResource]
+ config *Config
+ beforeConnect func(context.Context, *pgx.ConnConfig) error
+ afterConnect func(context.Context, *pgx.Conn) error
+ beforeAcquire func(context.Context, *pgx.Conn) bool
+ afterRelease func(*pgx.Conn) bool
+ beforeClose func(*pgx.Conn)
+ minConns int32
+ maxConns int32
+ maxConnLifetime time.Duration
+ maxConnLifetimeJitter time.Duration
+ maxConnIdleTime time.Duration
+ healthCheckPeriod time.Duration
+
+ healthCheckChan chan struct{}
+
+ acquireTracer AcquireTracer
+ releaseTracer ReleaseTracer
+
+ closeOnce sync.Once
+ closeChan chan struct{}
+}
+
+// Config is the configuration struct for creating a pool. It must be created by [ParseConfig] and then it can be
+// modified.
+type Config struct {
+ ConnConfig *pgx.ConnConfig
+
+ // BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
+ // will not impact any existing open connections.
+ BeforeConnect func(context.Context, *pgx.ConnConfig) error
+
+ // AfterConnect is called after a connection is established, but before it is added to the pool.
+ AfterConnect func(context.Context, *pgx.Conn) error
+
+ // BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
+ // acquisition or false to indicate that the connection should be destroyed and a different connection should be
+ // acquired.
+ BeforeAcquire func(context.Context, *pgx.Conn) bool
+
+ // AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
+ // return the connection to the pool or false to destroy the connection.
+ AfterRelease func(*pgx.Conn) bool
+
+ // BeforeClose is called right before a connection is closed and removed from the pool.
+ BeforeClose func(*pgx.Conn)
+
+ // MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
+ MaxConnLifetime time.Duration
+
+ // MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
+ // This helps prevent all connections from being closed at the exact same time, starving the pool.
+ MaxConnLifetimeJitter time.Duration
+
+ // MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
+ MaxConnIdleTime time.Duration
+
+ // MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
+ MaxConns int32
+
+ // MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
+ // number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
+ // to create new connections.
+ MinConns int32
+
+ // HealthCheckPeriod is the duration between checks of the health of idle connections.
+ HealthCheckPeriod time.Duration
+
+ createdByParseConfig bool // Used to enforce created by ParseConfig rule.
+}
+
+// Copy returns a deep copy of the config that is safe to use and modify.
+// The only exception is the tls.Config:
+// according to the tls.Config docs it must not be modified after creation.
+func (c *Config) Copy() *Config {
+ newConfig := new(Config)
+ *newConfig = *c
+ newConfig.ConnConfig = c.ConnConfig.Copy()
+ return newConfig
+}
+
+// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
+func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
+
+// New creates a new Pool. See [ParseConfig] for information on connString format.
+func New(ctx context.Context, connString string) (*Pool, error) {
+ config, err := ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithConfig(ctx, config)
+}
+
+// NewWithConfig creates a new Pool. config must have been created by [ParseConfig].
+func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
+ // Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
+ // zero values.
+ if !config.createdByParseConfig {
+ panic("config must be created by ParseConfig")
+ }
+
+ p := &Pool{
+ config: config,
+ beforeConnect: config.BeforeConnect,
+ afterConnect: config.AfterConnect,
+ beforeAcquire: config.BeforeAcquire,
+ afterRelease: config.AfterRelease,
+ beforeClose: config.BeforeClose,
+ minConns: config.MinConns,
+ maxConns: config.MaxConns,
+ maxConnLifetime: config.MaxConnLifetime,
+ maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
+ maxConnIdleTime: config.MaxConnIdleTime,
+ healthCheckPeriod: config.HealthCheckPeriod,
+ healthCheckChan: make(chan struct{}, 1),
+ closeChan: make(chan struct{}),
+ }
+
+ if t, ok := config.ConnConfig.Tracer.(AcquireTracer); ok {
+ p.acquireTracer = t
+ }
+
+ if t, ok := config.ConnConfig.Tracer.(ReleaseTracer); ok {
+ p.releaseTracer = t
+ }
+
+ var err error
+ p.p, err = puddle.NewPool(
+ &puddle.Config[*connResource]{
+ Constructor: func(ctx context.Context) (*connResource, error) {
+ atomic.AddInt64(&p.newConnsCount, 1)
+ connConfig := p.config.ConnConfig.Copy()
+
+ // Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
+ if connConfig.ConnectTimeout <= 0 {
+ connConfig.ConnectTimeout = 2 * time.Minute
+ }
+
+ if p.beforeConnect != nil {
+ if err := p.beforeConnect(ctx, connConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ conn, err := pgx.ConnectConfig(ctx, connConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if p.afterConnect != nil {
+ err = p.afterConnect(ctx, conn)
+ if err != nil {
+ conn.Close(ctx)
+ return nil, err
+ }
+ }
+
+ jitterSecs := rand.Float64() * config.MaxConnLifetimeJitter.Seconds()
+ maxAgeTime := time.Now().Add(config.MaxConnLifetime).Add(time.Duration(jitterSecs) * time.Second)
+
+ cr := &connResource{
+ conn: conn,
+ conns: make([]Conn, 64),
+ poolRows: make([]poolRow, 64),
+ poolRowss: make([]poolRows, 64),
+ maxAgeTime: maxAgeTime,
+ }
+
+ return cr, nil
+ },
+ Destructor: func(value *connResource) {
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ conn := value.conn
+ if p.beforeClose != nil {
+ p.beforeClose(conn)
+ }
+ conn.Close(ctx)
+ select {
+ case <-conn.PgConn().CleanupDone():
+ case <-ctx.Done():
+ }
+ cancel()
+ },
+ MaxSize: config.MaxConns,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ p.createIdleResources(ctx, int(p.minConns))
+ p.backgroundHealthCheck()
+ }()
+
+ return p, nil
+}
+
+// ParseConfig builds a Config from connString. It parses connString with the same behavior as [pgx.ParseConfig] with the
+// addition of the following variables:
+//
+// - pool_max_conns: integer greater than 0
+// - pool_min_conns: integer 0 or greater
+// - pool_max_conn_lifetime: duration string
+// - pool_max_conn_idle_time: duration string
+// - pool_health_check_period: duration string
+// - pool_max_conn_lifetime_jitter: duration string
+//
+// See Config for definitions of these arguments.
+//
+// # Example Keyword/Value
+// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
+//
+// # Example URL
+// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10
+func ParseConfig(connString string) (*Config, error) {
+ connConfig, err := pgx.ParseConfig(connString)
+ if err != nil {
+ return nil, err
+ }
+
+ config := &Config{
+ ConnConfig: connConfig,
+ createdByParseConfig: true,
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conns")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
+ }
+ if n < 1 {
+ return nil, fmt.Errorf("pool_max_conns too small: %d", n)
+ }
+ config.MaxConns = int32(n)
+ } else {
+ config.MaxConns = defaultMaxConns
+ if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
+ config.MaxConns = numCPU
+ }
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_min_conns")
+ n, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
+ }
+ config.MinConns = int32(n)
+ } else {
+ config.MinConns = defaultMinConns
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
+ }
+ config.MaxConnLifetime = d
+ } else {
+ config.MaxConnLifetime = defaultMaxConnLifetime
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
+ }
+ config.MaxConnIdleTime = d
+ } else {
+ config.MaxConnIdleTime = defaultMaxConnIdleTime
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
+ }
+ config.HealthCheckPeriod = d
+ } else {
+ config.HealthCheckPeriod = defaultHealthCheckPeriod
+ }
+
+ if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
+ delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
+ d, err := time.ParseDuration(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
+ }
+ config.MaxConnLifetimeJitter = d
+ }
+
+ return config, nil
+}
+
+// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
+// to pool and closed.
+func (p *Pool) Close() {
+ p.closeOnce.Do(func() {
+ close(p.closeChan)
+ p.p.Close()
+ })
+}
+
+func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
+ return time.Now().After(res.Value().maxAgeTime)
+}
+
+func (p *Pool) triggerHealthCheck() {
+ go func() {
+ // Destroy is asynchronous so we give it time to actually remove itself from
+ // the pool otherwise we might try to check the pool size too soon
+ time.Sleep(500 * time.Millisecond)
+ select {
+ case p.healthCheckChan <- struct{}{}:
+ default:
+ }
+ }()
+}
+
+func (p *Pool) backgroundHealthCheck() {
+ ticker := time.NewTicker(p.healthCheckPeriod)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-p.closeChan:
+ return
+ case <-p.healthCheckChan:
+ p.checkHealth()
+ case <-ticker.C:
+ p.checkHealth()
+ }
+ }
+}
+
+func (p *Pool) checkHealth() {
+ for {
+ // If checkMinConns failed we don't destroy any connections since we couldn't
+ // even get to minConns
+ if err := p.checkMinConns(); err != nil {
+ // Should we log this error somewhere?
+ break
+ }
+ if !p.checkConnsHealth() {
+ // Since we didn't destroy any connections we can stop looping
+ break
+ }
+ // Technically Destroy is asynchronous but 500ms should be enough for it to
+ // remove it from the underlying pool
+ select {
+ case <-p.closeChan:
+ return
+ case <-time.After(500 * time.Millisecond):
+ }
+ }
+}
+
+// checkConnsHealth will check all idle connections, destroy a connection if
+// it's idle or too old, and returns true if any were destroyed
+func (p *Pool) checkConnsHealth() bool {
+ var destroyed bool
+ totalConns := p.Stat().TotalConns()
+ resources := p.p.AcquireAllIdle()
+ for _, res := range resources {
+ // We're okay going under minConns if the lifetime is up
+ if p.isExpired(res) && totalConns >= p.minConns {
+ atomic.AddInt64(&p.lifetimeDestroyCount, 1)
+ res.Destroy()
+ destroyed = true
+ // Since Destroy is async we manually decrement totalConns.
+ totalConns--
+ } else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
+ atomic.AddInt64(&p.idleDestroyCount, 1)
+ res.Destroy()
+ destroyed = true
+ // Since Destroy is async we manually decrement totalConns.
+ totalConns--
+ } else {
+ res.ReleaseUnused()
+ }
+ }
+ return destroyed
+}
+
+func (p *Pool) checkMinConns() error {
+ // TotalConns can include ones that are being destroyed but we should have
+ // sleep(500ms) around all of the destroys to help prevent that from throwing
+ // off this check
+ toCreate := p.minConns - p.Stat().TotalConns()
+ if toCreate > 0 {
+ return p.createIdleResources(context.Background(), int(toCreate))
+ }
+ return nil
+}
+
+func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ errs := make(chan error, targetResources)
+
+ for i := 0; i < targetResources; i++ {
+ go func() {
+ err := p.p.CreateResource(ctx)
+ // Ignore ErrNotAvailable since it means that the pool has become full since we started creating resource.
+ if err == puddle.ErrNotAvailable {
+ err = nil
+ }
+ errs <- err
+ }()
+ }
+
+ var firstError error
+ for i := 0; i < targetResources; i++ {
+ err := <-errs
+ if err != nil && firstError == nil {
+ cancel()
+ firstError = err
+ }
+ }
+
+ return firstError
+}
+
+// Acquire returns a connection (*Conn) from the Pool
+func (p *Pool) Acquire(ctx context.Context) (c *Conn, err error) {
+ if p.acquireTracer != nil {
+ ctx = p.acquireTracer.TraceAcquireStart(ctx, p, TraceAcquireStartData{})
+ defer func() {
+ var conn *pgx.Conn
+ if c != nil {
+ conn = c.Conn()
+ }
+ p.acquireTracer.TraceAcquireEnd(ctx, p, TraceAcquireEndData{Conn: conn, Err: err})
+ }()
+ }
+
+ for {
+ res, err := p.p.Acquire(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ cr := res.Value()
+
+ if res.IdleDuration() > time.Second {
+ err := cr.conn.Ping(ctx)
+ if err != nil {
+ res.Destroy()
+ continue
+ }
+ }
+
+ if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
+ return cr.getConn(p, res), nil
+ }
+
+ res.Destroy()
+ }
+}
+
+// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
+// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
+// automatically released after the call of f.
+func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
+ conn, err := p.Acquire(ctx)
+ if err != nil {
+ return err
+ }
+ defer conn.Release()
+
+ return f(conn)
+}
+
+// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
+// keep-alive functionality. It does not update pool statistics.
+func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
+ resources := p.p.AcquireAllIdle()
+ conns := make([]*Conn, 0, len(resources))
+ for _, res := range resources {
+ cr := res.Value()
+ if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
+ conns = append(conns, cr.getConn(p, res))
+ } else {
+ res.Destroy()
+ }
+ }
+
+ return conns
+}
+
+// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
+// disrupt all connections (such as a network interruption or a server state change).
+//
+// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
+// to the pool.
+func (p *Pool) Reset() {
+ p.p.Reset()
+}
+
+// Config returns a copy of config that was used to initialize this pool.
+func (p *Pool) Config() *Config { return p.config.Copy() }
+
+// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
+func (p *Pool) Stat() *Stat {
+ return &Stat{
+ s: p.p.Stat(),
+ newConnsCount: atomic.LoadInt64(&p.newConnsCount),
+ lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
+ idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
+ }
+}
+
+// Exec acquires a connection from the Pool and executes the given SQL.
+// SQL can be either a prepared statement name or an SQL string.
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+// The acquired connection is returned to the pool when the Exec function returns.
+func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ defer c.Release()
+
+ return c.Exec(ctx, sql, arguments...)
+}
+
+// Query acquires a connection and executes a query that returns pgx.Rows.
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
+//
+// If there is an error, the returned pgx.Rows will be returned in an error state.
+// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
+//
+// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errRows{err: err}, err
+ }
+
+ rows, err := c.Query(ctx, sql, args...)
+ if err != nil {
+ c.Release()
+ return errRows{err: err}, err
+ }
+
+ return c.getPoolRows(rows), nil
+}
+
+// QueryRow acquires a connection and executes a query that is expected
+// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
+// Scan method is called. If the query selects no rows, pgx.Row's Scan will
+// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
+// and discards the rest. The acquired connection is returned to the Pool when
+// pgx.Row's Scan method is called.
+//
+// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
+//
+// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
+// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
+// needed. See the documentation for those types for details.
+func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errRow{err: err}
+ }
+
+ row := c.QueryRow(ctx, sql, args...)
+ return c.getPoolRow(row)
+}
+
+func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return errBatchResults{err: err}
+ }
+
+ br := c.SendBatch(ctx, b)
+ return &poolBatchResults{br: br, c: c}
+}
+
+// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
+// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
+// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
+// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
+func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
+ return p.BeginTx(ctx, pgx.TxOptions{})
+}
+
+// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
+// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
+// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
+// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
+func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ t, err := c.BeginTx(ctx, txOptions)
+ if err != nil {
+ c.Release()
+ return nil, err
+ }
+
+ return &Tx{t: t, c: c}, nil
+}
+
+func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer c.Release()
+
+ return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// Ping acquires a connection from the Pool and executes an empty sql statement against it.
+// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
+func (p *Pool) Ping(ctx context.Context) error {
+ c, err := p.Acquire(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.Release()
+ return c.Ping(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go b/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
new file mode 100644
index 0000000..f834b7e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
@@ -0,0 +1,116 @@
+package pgxpool
+
+import (
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+type errRows struct {
+ err error
+}
+
+func (errRows) Close() {}
+func (e errRows) Err() error { return e.err }
+func (errRows) CommandTag() pgconn.CommandTag { return pgconn.CommandTag{} }
+func (errRows) FieldDescriptions() []pgconn.FieldDescription { return nil }
+func (errRows) Next() bool { return false }
+func (e errRows) Scan(dest ...any) error { return e.err }
+func (e errRows) Values() ([]any, error) { return nil, e.err }
+func (e errRows) RawValues() [][]byte { return nil }
+func (e errRows) Conn() *pgx.Conn { return nil }
+
+type errRow struct {
+ err error
+}
+
+func (e errRow) Scan(dest ...any) error { return e.err }
+
+type poolRows struct {
+ r pgx.Rows
+ c *Conn
+ err error
+}
+
+func (rows *poolRows) Close() {
+ rows.r.Close()
+ if rows.c != nil {
+ rows.c.Release()
+ rows.c = nil
+ }
+}
+
+func (rows *poolRows) Err() error {
+ if rows.err != nil {
+ return rows.err
+ }
+ return rows.r.Err()
+}
+
+func (rows *poolRows) CommandTag() pgconn.CommandTag {
+ return rows.r.CommandTag()
+}
+
+func (rows *poolRows) FieldDescriptions() []pgconn.FieldDescription {
+ return rows.r.FieldDescriptions()
+}
+
+func (rows *poolRows) Next() bool {
+ if rows.err != nil {
+ return false
+ }
+
+ n := rows.r.Next()
+ if !n {
+ rows.Close()
+ }
+ return n
+}
+
+func (rows *poolRows) Scan(dest ...any) error {
+ err := rows.r.Scan(dest...)
+ if err != nil {
+ rows.Close()
+ }
+ return err
+}
+
+func (rows *poolRows) Values() ([]any, error) {
+ values, err := rows.r.Values()
+ if err != nil {
+ rows.Close()
+ }
+ return values, err
+}
+
+func (rows *poolRows) RawValues() [][]byte {
+ return rows.r.RawValues()
+}
+
+func (rows *poolRows) Conn() *pgx.Conn {
+ return rows.r.Conn()
+}
+
+type poolRow struct {
+ r pgx.Row
+ c *Conn
+ err error
+}
+
+func (row *poolRow) Scan(dest ...any) error {
+ if row.err != nil {
+ return row.err
+ }
+
+ panicked := true
+ defer func() {
+ if panicked && row.c != nil {
+ row.c.Release()
+ }
+ }()
+ err := row.r.Scan(dest...)
+ panicked = false
+ if row.c != nil {
+ row.c.Release()
+ }
+ return err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go b/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
new file mode 100644
index 0000000..cfa0c4c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
@@ -0,0 +1,84 @@
+package pgxpool
+
+import (
+ "time"
+
+ "github.com/jackc/puddle/v2"
+)
+
+// Stat is a snapshot of Pool statistics.
+type Stat struct {
+ s *puddle.Stat
+ newConnsCount int64
+ lifetimeDestroyCount int64
+ idleDestroyCount int64
+}
+
+// AcquireCount returns the cumulative count of successful acquires from the pool.
+func (s *Stat) AcquireCount() int64 {
+ return s.s.AcquireCount()
+}
+
+// AcquireDuration returns the total duration of all successful acquires from
+// the pool.
+func (s *Stat) AcquireDuration() time.Duration {
+ return s.s.AcquireDuration()
+}
+
+// AcquiredConns returns the number of currently acquired connections in the pool.
+func (s *Stat) AcquiredConns() int32 {
+ return s.s.AcquiredResources()
+}
+
+// CanceledAcquireCount returns the cumulative count of acquires from the pool
+// that were canceled by a context.
+func (s *Stat) CanceledAcquireCount() int64 {
+ return s.s.CanceledAcquireCount()
+}
+
+// ConstructingConns returns the number of conns with construction in progress in
+// the pool.
+func (s *Stat) ConstructingConns() int32 {
+ return s.s.ConstructingResources()
+}
+
+// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
+// that waited for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireCount() int64 {
+ return s.s.EmptyAcquireCount()
+}
+
+// IdleConns returns the number of currently idle conns in the pool.
+func (s *Stat) IdleConns() int32 {
+ return s.s.IdleResources()
+}
+
+// MaxConns returns the maximum size of the pool.
+func (s *Stat) MaxConns() int32 {
+ return s.s.MaxResources()
+}
+
+// TotalConns returns the total number of resources currently in the pool.
+// The value is the sum of ConstructingConns, AcquiredConns, and
+// IdleConns.
+func (s *Stat) TotalConns() int32 {
+ return s.s.TotalResources()
+}
+
+// NewConnsCount returns the cumulative count of new connections opened.
+func (s *Stat) NewConnsCount() int64 {
+ return s.newConnsCount
+}
+
+// MaxLifetimeDestroyCount returns the cumulative count of connections destroyed
+// because they exceeded MaxConnLifetime.
+func (s *Stat) MaxLifetimeDestroyCount() int64 {
+ return s.lifetimeDestroyCount
+}
+
+// MaxIdleDestroyCount returns the cumulative count of connections destroyed because
+// they exceeded MaxConnIdleTime.
+func (s *Stat) MaxIdleDestroyCount() int64 {
+ return s.idleDestroyCount
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go
new file mode 100644
index 0000000..78b9d15
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go
@@ -0,0 +1,33 @@
+package pgxpool
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// AcquireTracer traces Acquire.
+type AcquireTracer interface {
+ // TraceAcquireStart is called at the beginning of Acquire.
+ // The returned context is used for the rest of the call and will be passed to the TraceAcquireEnd.
+ TraceAcquireStart(ctx context.Context, pool *Pool, data TraceAcquireStartData) context.Context
+ // TraceAcquireEnd is called when a connection has been acquired.
+ TraceAcquireEnd(ctx context.Context, pool *Pool, data TraceAcquireEndData)
+}
+
+type TraceAcquireStartData struct{}
+
+type TraceAcquireEndData struct {
+ Conn *pgx.Conn
+ Err error
+}
+
+// ReleaseTracer traces Release.
+type ReleaseTracer interface {
+ // TraceRelease is called at the beginning of Release.
+ TraceRelease(pool *Pool, data TraceReleaseData)
+}
+
+type TraceReleaseData struct {
+ Conn *pgx.Conn
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
new file mode 100644
index 0000000..74df859
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
@@ -0,0 +1,82 @@
+package pgxpool
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// Tx represents a database transaction acquired from a Pool.
+type Tx struct {
+ t pgx.Tx
+ c *Conn
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
+ return tx.t.Begin(ctx)
+}
+
+// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
+// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
+// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
+func (tx *Tx) Commit(ctx context.Context) error {
+ err := tx.t.Commit(ctx)
+ if tx.c != nil {
+ tx.c.Release()
+ tx.c = nil
+ }
+ return err
+}
+
+// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
+// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
+// tx.Commit() will be called first in a non-error condition.
+func (tx *Tx) Rollback(ctx context.Context) error {
+ err := tx.t.Rollback(ctx)
+ if tx.c != nil {
+ tx.c.Release()
+ tx.c = nil
+ }
+ return err
+}
+
+func (tx *Tx) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
+ return tx.t.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+func (tx *Tx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
+ return tx.t.SendBatch(ctx, b)
+}
+
+func (tx *Tx) LargeObjects() pgx.LargeObjects {
+ return tx.t.LargeObjects()
+}
+
+// Prepare creates a prepared statement with name and sql. If the name is empty,
+// an anonymous prepared statement will be used. sql can contain placeholders
+// for bound parameters. These placeholders are referenced positionally as $1, $2, etc.
+//
+// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
+// name and sql arguments. This allows a code path to Prepare and Query/Exec without
+// needing to first check whether the statement has already been prepared.
+func (tx *Tx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ return tx.t.Prepare(ctx, name, sql)
+}
+
+func (tx *Tx) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+ return tx.t.Exec(ctx, sql, arguments...)
+}
+
+func (tx *Tx) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
+ return tx.t.Query(ctx, sql, args...)
+}
+
+func (tx *Tx) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
+ return tx.t.QueryRow(ctx, sql, args...)
+}
+
+func (tx *Tx) Conn() *pgx.Conn {
+ return tx.t.Conn()
+}
diff --git a/vendor/github.com/jackc/pgx/v5/rows.go b/vendor/github.com/jackc/pgx/v5/rows.go
new file mode 100644
index 0000000..d4f7a90
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/rows.go
@@ -0,0 +1,851 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// Rows is the result set returned from *Conn.Query. Rows must be closed before
+// the *Conn can be used again. Rows are closed by explicitly calling Close(),
+// calling Next() until it returns false, or when a fatal error occurs.
+//
+// Once a Rows is closed the only methods that may be called are Close(), Err(),
+// and CommandTag().
+//
+// Rows is an interface instead of a struct to allow tests to mock Query. However,
+// adding a method to an interface is technically a breaking change. Because of this
+// the Rows interface is partially excluded from semantic version requirements.
+// Methods will not be removed or changed, but new methods may be added.
+type Rows interface {
+ // Close closes the rows, making the connection ready for use again. It is safe
+ // to call Close after rows is already closed.
+ Close()
+
+ // Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
+ // calling Close or by Next returning false). If it is called early it may return nil even if there was an error
+ // executing the query.
+ Err() error
+
+ // CommandTag returns the command tag from this query. It is only available after Rows is closed.
+ CommandTag() pgconn.CommandTag
+
+ // FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
+ // when there was an error executing the query.
+ FieldDescriptions() []pgconn.FieldDescription
+
+ // Next prepares the next row for reading. It returns true if there is another
+ // row and false if no more rows are available or a fatal error has occurred.
+ // It automatically closes rows when all rows are read.
+ //
+ // Callers should check rows.Err() after rows.Next() returns false to detect
+ // whether result-set reading ended prematurely due to an error. See
+ // Conn.Query for details.
+ //
+ // For simpler error handling, consider using the higher-level pgx v5
+ // CollectRows() and ForEachRow() helpers instead.
+ Next() bool
+
+ // Scan reads the values from the current row into dest values positionally.
+ // dest can include pointers to core types, values implementing the Scanner
+ // interface, and nil. nil will skip the value entirely. It is an error to
+ // call Scan without first calling Next() and checking that it returned true.
+ Scan(dest ...any) error
+
+ // Values returns the decoded row values. As with Scan(), it is an error to
+ // call Values without first calling Next() and checking that it returned
+ // true.
+ Values() ([]any, error)
+
+ // RawValues returns the unparsed bytes of the row values. The returned data is only valid until the next Next
+ // call or the Rows is closed.
+ RawValues() [][]byte
+
+ // Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a
+ // *Conn (e.g. if it was created by RowsFromResultReader)
+ Conn() *Conn
+}
+
+// Row is a convenience wrapper over Rows that is returned by QueryRow.
+//
+// Row is an interface instead of a struct to allow tests to mock QueryRow. However,
+// adding a method to an interface is technically a breaking change. Because of this
+// the Row interface is partially excluded from semantic version requirements.
+// Methods will not be removed or changed, but new methods may be added.
+type Row interface {
+ // Scan works the same as Rows. with the following exceptions. If no
+ // rows were found it returns ErrNoRows. If multiple rows are returned it
+ // ignores all but the first.
+ Scan(dest ...any) error
+}
+
+// RowScanner scans an entire row at a time into the RowScanner.
+type RowScanner interface {
+ // ScanRows scans the row.
+ ScanRow(rows Rows) error
+}
+
+// connRow implements the Row interface for Conn.QueryRow.
+type connRow baseRows
+
+func (r *connRow) Scan(dest ...any) (err error) {
+ rows := (*baseRows)(r)
+
+ if rows.Err() != nil {
+ return rows.Err()
+ }
+
+ for _, d := range dest {
+ if _, ok := d.(*pgtype.DriverBytes); ok {
+ rows.Close()
+ return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
+ }
+ }
+
+ if !rows.Next() {
+ if rows.Err() == nil {
+ return ErrNoRows
+ }
+ return rows.Err()
+ }
+
+ rows.Scan(dest...)
+ rows.Close()
+ return rows.Err()
+}
+
+// baseRows implements the Rows interface for Conn.Query.
+type baseRows struct {
+ typeMap *pgtype.Map
+ resultReader *pgconn.ResultReader
+
+ values [][]byte
+
+ commandTag pgconn.CommandTag
+ err error
+ closed bool
+
+ scanPlans []pgtype.ScanPlan
+ scanTypes []reflect.Type
+
+ conn *Conn
+ multiResultReader *pgconn.MultiResultReader
+
+ queryTracer QueryTracer
+ batchTracer BatchTracer
+ ctx context.Context
+ startTime time.Time
+ sql string
+ args []any
+ rowCount int
+}
+
+func (rows *baseRows) FieldDescriptions() []pgconn.FieldDescription {
+ return rows.resultReader.FieldDescriptions()
+}
+
+func (rows *baseRows) Close() {
+ if rows.closed {
+ return
+ }
+
+ rows.closed = true
+
+ if rows.resultReader != nil {
+ var closeErr error
+ rows.commandTag, closeErr = rows.resultReader.Close()
+ if rows.err == nil {
+ rows.err = closeErr
+ }
+ }
+
+ if rows.multiResultReader != nil {
+ closeErr := rows.multiResultReader.Close()
+ if rows.err == nil {
+ rows.err = closeErr
+ }
+ }
+
+ if rows.err != nil && rows.conn != nil && rows.sql != "" {
+ if sc := rows.conn.statementCache; sc != nil {
+ sc.Invalidate(rows.sql)
+ }
+
+ if sc := rows.conn.descriptionCache; sc != nil {
+ sc.Invalidate(rows.sql)
+ }
+ }
+
+ if rows.batchTracer != nil {
+ rows.batchTracer.TraceBatchQuery(rows.ctx, rows.conn, TraceBatchQueryData{SQL: rows.sql, Args: rows.args, CommandTag: rows.commandTag, Err: rows.err})
+ } else if rows.queryTracer != nil {
+ rows.queryTracer.TraceQueryEnd(rows.ctx, rows.conn, TraceQueryEndData{rows.commandTag, rows.err})
+ }
+}
+
+func (rows *baseRows) CommandTag() pgconn.CommandTag {
+ return rows.commandTag
+}
+
+func (rows *baseRows) Err() error {
+ return rows.err
+}
+
+// fatal signals an error occurred after the query was sent to the server. It
+// closes the rows automatically.
+func (rows *baseRows) fatal(err error) {
+ if rows.err != nil {
+ return
+ }
+
+ rows.err = err
+ rows.Close()
+}
+
+func (rows *baseRows) Next() bool {
+ if rows.closed {
+ return false
+ }
+
+ if rows.resultReader.NextRow() {
+ rows.rowCount++
+ rows.values = rows.resultReader.Values()
+ return true
+ } else {
+ rows.Close()
+ return false
+ }
+}
+
+func (rows *baseRows) Scan(dest ...any) error {
+ m := rows.typeMap
+ fieldDescriptions := rows.FieldDescriptions()
+ values := rows.values
+
+ if len(fieldDescriptions) != len(values) {
+ err := fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
+ rows.fatal(err)
+ return err
+ }
+
+ if len(dest) == 1 {
+ if rc, ok := dest[0].(RowScanner); ok {
+ err := rc.ScanRow(rows)
+ if err != nil {
+ rows.fatal(err)
+ }
+ return err
+ }
+ }
+
+ if len(fieldDescriptions) != len(dest) {
+ err := fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
+ rows.fatal(err)
+ return err
+ }
+
+ if rows.scanPlans == nil {
+ rows.scanPlans = make([]pgtype.ScanPlan, len(values))
+ rows.scanTypes = make([]reflect.Type, len(values))
+ for i := range dest {
+ rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
+ rows.scanTypes[i] = reflect.TypeOf(dest[i])
+ }
+ }
+
+ for i, dst := range dest {
+ if dst == nil {
+ continue
+ }
+
+ if rows.scanTypes[i] != reflect.TypeOf(dst) {
+ rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
+ rows.scanTypes[i] = reflect.TypeOf(dest[i])
+ }
+
+ err := rows.scanPlans[i].Scan(values[i], dst)
+ if err != nil {
+ err = ScanArgError{ColumnIndex: i, Err: err}
+ rows.fatal(err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (rows *baseRows) Values() ([]any, error) {
+ if rows.closed {
+ return nil, errors.New("rows is closed")
+ }
+
+ values := make([]any, 0, len(rows.FieldDescriptions()))
+
+ for i := range rows.FieldDescriptions() {
+ buf := rows.values[i]
+ fd := &rows.FieldDescriptions()[i]
+
+ if buf == nil {
+ values = append(values, nil)
+ continue
+ }
+
+ if dt, ok := rows.typeMap.TypeForOID(fd.DataTypeOID); ok {
+ value, err := dt.Codec.DecodeValue(rows.typeMap, fd.DataTypeOID, fd.Format, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, value)
+ } else {
+ switch fd.Format {
+ case TextFormatCode:
+ values = append(values, string(buf))
+ case BinaryFormatCode:
+ newBuf := make([]byte, len(buf))
+ copy(newBuf, buf)
+ values = append(values, newBuf)
+ default:
+ rows.fatal(errors.New("unknown format code"))
+ }
+ }
+
+ if rows.Err() != nil {
+ return nil, rows.Err()
+ }
+ }
+
+ return values, rows.Err()
+}
+
+func (rows *baseRows) RawValues() [][]byte {
+ return rows.values
+}
+
+func (rows *baseRows) Conn() *Conn {
+ return rows.conn
+}
+
+type ScanArgError struct {
+ ColumnIndex int
+ Err error
+}
+
+func (e ScanArgError) Error() string {
+ return fmt.Sprintf("can't scan into dest[%d]: %v", e.ColumnIndex, e.Err)
+}
+
+func (e ScanArgError) Unwrap() error {
+ return e.Err
+}
+
+// ScanRow decodes raw row data into dest. It can be used to scan rows read from the lower level pgconn interface.
+//
+// typeMap - OID to Go type mapping.
+// fieldDescriptions - OID and format of values
+// values - the raw data as returned from the PostgreSQL server
+// dest - the destination that values will be decoded into
+func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, values [][]byte, dest ...any) error {
+ if len(fieldDescriptions) != len(values) {
+ return fmt.Errorf("number of field descriptions must equal number of values, got %d and %d", len(fieldDescriptions), len(values))
+ }
+ if len(fieldDescriptions) != len(dest) {
+ return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
+ }
+
+ for i, d := range dest {
+ if d == nil {
+ continue
+ }
+
+ err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
+ if err != nil {
+ return ScanArgError{ColumnIndex: i, Err: err}
+ }
+ }
+
+ return nil
+}
+
+// RowsFromResultReader returns a Rows that will read from values resultReader and decode with typeMap. It can be used
+// to read from the lower level pgconn interface.
+func RowsFromResultReader(typeMap *pgtype.Map, resultReader *pgconn.ResultReader) Rows {
+ return &baseRows{
+ typeMap: typeMap,
+ resultReader: resultReader,
+ }
+}
+
+// ForEachRow iterates through rows. For each row it scans into the elements of scans and calls fn. If any row
+// fails to scan or fn returns an error the query will be aborted and the error will be returned. Rows will be closed
+// when ForEachRow returns.
+func ForEachRow(rows Rows, scans []any, fn func() error) (pgconn.CommandTag, error) {
+ defer rows.Close()
+
+ for rows.Next() {
+ err := rows.Scan(scans...)
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ err = fn()
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
+ }
+
+ if err := rows.Err(); err != nil {
+ return pgconn.CommandTag{}, err
+ }
+
+ return rows.CommandTag(), nil
+}
+
+// CollectableRow is the subset of Rows methods that a RowToFunc is allowed to call.
+type CollectableRow interface {
+ FieldDescriptions() []pgconn.FieldDescription
+ Scan(dest ...any) error
+ Values() ([]any, error)
+ RawValues() [][]byte
+}
+
+// RowToFunc is a function that scans or otherwise converts row to a T.
+type RowToFunc[T any] func(row CollectableRow) (T, error)
+
+// AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T.
+//
+// This function closes the rows automatically on return.
+func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) {
+ defer rows.Close()
+
+ for rows.Next() {
+ value, err := fn(rows)
+ if err != nil {
+ return nil, err
+ }
+ slice = append(slice, value)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return slice, nil
+}
+
+// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
+//
+// This function closes the rows automatically on return.
+func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
+ return AppendRows([]T{}, rows, fn)
+}
+
+// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
+// CollectOneRow is to CollectRows as QueryRow is to Query.
+//
+// This function closes the rows automatically on return.
+func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
+ defer rows.Close()
+
+ var value T
+ var err error
+
+ if !rows.Next() {
+ if err = rows.Err(); err != nil {
+ return value, err
+ }
+ return value, ErrNoRows
+ }
+
+ value, err = fn(rows)
+ if err != nil {
+ return value, err
+ }
+
+ rows.Close()
+ return value, rows.Err()
+}
+
+// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
+// - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
+// - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
+//
+// This function closes the rows automatically on return.
+func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
+ defer rows.Close()
+
+ var (
+ err error
+ value T
+ )
+
+ if !rows.Next() {
+ if err = rows.Err(); err != nil {
+ return value, err
+ }
+
+ return value, ErrNoRows
+ }
+
+ value, err = fn(rows)
+ if err != nil {
+ return value, err
+ }
+
+ if rows.Next() {
+ var zero T
+
+ return zero, ErrTooManyRows
+ }
+
+ return value, rows.Err()
+}
+
+// RowTo returns a T scanned from row.
+func RowTo[T any](row CollectableRow) (T, error) {
+ var value T
+ err := row.Scan(&value)
+ return value, err
+}
+
+// RowTo returns a the address of a T scanned from row.
+func RowToAddrOf[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := row.Scan(&value)
+ return &value, err
+}
+
+// RowToMap returns a map scanned from row.
+func RowToMap(row CollectableRow) (map[string]any, error) {
+ var value map[string]any
+ err := row.Scan((*mapRowScanner)(&value))
+ return value, err
+}
+
+type mapRowScanner map[string]any
+
+func (rs *mapRowScanner) ScanRow(rows Rows) error {
+ values, err := rows.Values()
+ if err != nil {
+ return err
+ }
+
+ *rs = make(mapRowScanner, len(values))
+
+ for i := range values {
+ (*rs)[string(rows.FieldDescriptions()[i].Name)] = values[i]
+ }
+
+ return nil
+}
+
+// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
+// has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then the field will be
+// ignored.
+func RowToStructByPos[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
+// public fields as row has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then
+// the field will be ignored.
+func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return &value, err
+}
+
+type positionalStructRowScanner struct {
+ ptrToStruct any
+}
+
+func (rs *positionalStructRowScanner) ScanRow(rows CollectableRow) error {
+ typ := reflect.TypeOf(rs.ptrToStruct).Elem()
+ fields := lookupStructFields(typ)
+ if len(rows.RawValues()) > len(fields) {
+ return fmt.Errorf(
+ "got %d values, but dst struct has only %d fields",
+ len(rows.RawValues()),
+ len(fields),
+ )
+ }
+ scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
+ return rows.Scan(scanTargets...)
+}
+
+// Map from reflect.Type -> []structRowField
+var positionalStructFieldMap sync.Map
+
+func lookupStructFields(t reflect.Type) []structRowField {
+ if cached, ok := positionalStructFieldMap.Load(t); ok {
+ return cached.([]structRowField)
+ }
+
+ fieldStack := make([]int, 0, 1)
+ fields := computeStructFields(t, make([]structRowField, 0, t.NumField()), &fieldStack)
+ fieldsIface, _ := positionalStructFieldMap.LoadOrStore(t, fields)
+ return fieldsIface.([]structRowField)
+}
+
+func computeStructFields(
+ t reflect.Type,
+ fields []structRowField,
+ fieldStack *[]int,
+) []structRowField {
+ tail := len(*fieldStack)
+ *fieldStack = append(*fieldStack, 0)
+ for i := 0; i < t.NumField(); i++ {
+ sf := t.Field(i)
+ (*fieldStack)[tail] = i
+ // Handle anonymous struct embedding, but do not try to handle embedded pointers.
+ if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
+ fields = computeStructFields(sf.Type, fields, fieldStack)
+ } else if sf.PkgPath == "" {
+ dbTag, _ := sf.Tag.Lookup(structTagKey)
+ if dbTag == "-" {
+ // Field is ignored, skip it.
+ continue
+ }
+ fields = append(fields, structRowField{
+ path: append([]int(nil), *fieldStack...),
+ })
+ }
+ }
+ *fieldStack = (*fieldStack)[:tail]
+ return fields
+}
+
+// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
+// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
+// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
+func RowToStructByName[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
+// of named public fields as row has fields. The row and T fields will be matched by name. The match is
+// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
+// then the field will be ignored.
+func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
+ return &value, err
+}
+
+// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
+// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
+// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
+func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
+ return value, err
+}
+
+// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
+// equal number of named public fields as row has fields. The row and T fields will be matched by name. The match is
+// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
+// then the field will be ignored.
+func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
+ var value T
+ err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
+ return &value, err
+}
+
+type namedStructRowScanner struct {
+ ptrToStruct any
+ lax bool
+}
+
+func (rs *namedStructRowScanner) ScanRow(rows CollectableRow) error {
+ typ := reflect.TypeOf(rs.ptrToStruct).Elem()
+ fldDescs := rows.FieldDescriptions()
+ namedStructFields, err := lookupNamedStructFields(typ, fldDescs)
+ if err != nil {
+ return err
+ }
+ if !rs.lax && namedStructFields.missingField != "" {
+ return fmt.Errorf("cannot find field %s in returned row", namedStructFields.missingField)
+ }
+ fields := namedStructFields.fields
+ scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
+ return rows.Scan(scanTargets...)
+}
+
+// Map from namedStructFieldMap -> *namedStructFields
+var namedStructFieldMap sync.Map
+
+type namedStructFieldsKey struct {
+ t reflect.Type
+ colNames string
+}
+
+type namedStructFields struct {
+ fields []structRowField
+ // missingField is the first field from the struct without a corresponding row field.
+ // This is used to construct the correct error message for non-lax queries.
+ missingField string
+}
+
+func lookupNamedStructFields(
+ t reflect.Type,
+ fldDescs []pgconn.FieldDescription,
+) (*namedStructFields, error) {
+ key := namedStructFieldsKey{
+ t: t,
+ colNames: joinFieldNames(fldDescs),
+ }
+ if cached, ok := namedStructFieldMap.Load(key); ok {
+ return cached.(*namedStructFields), nil
+ }
+
+ // We could probably do two-levels of caching, where we compute the key -> fields mapping
+ // for a type only once, cache it by type, then use that to compute the column -> fields
+ // mapping for a given set of columns.
+ fieldStack := make([]int, 0, 1)
+ fields, missingField := computeNamedStructFields(
+ fldDescs,
+ t,
+ make([]structRowField, len(fldDescs)),
+ &fieldStack,
+ )
+ for i, f := range fields {
+ if f.path == nil {
+ return nil, fmt.Errorf(
+ "struct doesn't have corresponding row field %s",
+ fldDescs[i].Name,
+ )
+ }
+ }
+
+ fieldsIface, _ := namedStructFieldMap.LoadOrStore(
+ key,
+ &namedStructFields{fields: fields, missingField: missingField},
+ )
+ return fieldsIface.(*namedStructFields), nil
+}
+
+func joinFieldNames(fldDescs []pgconn.FieldDescription) string {
+ switch len(fldDescs) {
+ case 0:
+ return ""
+ case 1:
+ return fldDescs[0].Name
+ }
+
+ totalSize := len(fldDescs) - 1 // Space for separator bytes.
+ for _, d := range fldDescs {
+ totalSize += len(d.Name)
+ }
+ var b strings.Builder
+ b.Grow(totalSize)
+ b.WriteString(fldDescs[0].Name)
+ for _, d := range fldDescs[1:] {
+ b.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character.
+ b.WriteString(d.Name)
+ }
+ return b.String()
+}
+
+func computeNamedStructFields(
+ fldDescs []pgconn.FieldDescription,
+ t reflect.Type,
+ fields []structRowField,
+ fieldStack *[]int,
+) ([]structRowField, string) {
+ var missingField string
+ tail := len(*fieldStack)
+ *fieldStack = append(*fieldStack, 0)
+ for i := 0; i < t.NumField(); i++ {
+ sf := t.Field(i)
+ (*fieldStack)[tail] = i
+ if sf.PkgPath != "" && !sf.Anonymous {
+ // Field is unexported, skip it.
+ continue
+ }
+ // Handle anonymous struct embedding, but do not try to handle embedded pointers.
+ if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
+ var missingSubField string
+ fields, missingSubField = computeNamedStructFields(
+ fldDescs,
+ sf.Type,
+ fields,
+ fieldStack,
+ )
+ if missingField == "" {
+ missingField = missingSubField
+ }
+ } else {
+ dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
+ if dbTagPresent {
+ dbTag, _, _ = strings.Cut(dbTag, ",")
+ }
+ if dbTag == "-" {
+ // Field is ignored, skip it.
+ continue
+ }
+ colName := dbTag
+ if !dbTagPresent {
+ colName = sf.Name
+ }
+ fpos := fieldPosByName(fldDescs, colName)
+ if fpos == -1 {
+ if missingField == "" {
+ missingField = colName
+ }
+ continue
+ }
+ fields[fpos] = structRowField{
+ path: append([]int(nil), *fieldStack...),
+ }
+ }
+ }
+ *fieldStack = (*fieldStack)[:tail]
+
+ return fields, missingField
+}
+
+const structTagKey = "db"
+
+func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
+ i = -1
+ for i, desc := range fldDescs {
+
+ // Snake case support.
+ field = strings.ReplaceAll(field, "_", "")
+ descName := strings.ReplaceAll(desc.Name, "_", "")
+
+ if strings.EqualFold(descName, field) {
+ return i
+ }
+ }
+ return
+}
+
+// structRowField describes a field of a struct.
+//
+// TODO: It would be a bit more efficient to track the path using the pointer
+// offset within the (outermost) struct and use unsafe.Pointer arithmetic to
+// construct references when scanning rows. However, it's not clear it's worth
+// using unsafe for this.
+type structRowField struct {
+ path []int
+}
+
+func setupStructScanTargets(receiver any, fields []structRowField) []any {
+ scanTargets := make([]any, len(fields))
+ v := reflect.ValueOf(receiver).Elem()
+ for i, f := range fields {
+ scanTargets[i] = v.FieldByIndex(f.path).Addr().Interface()
+ }
+ return scanTargets
+}
diff --git a/vendor/github.com/jackc/pgx/v5/stdlib/sql.go b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go
new file mode 100644
index 0000000..29cd3fb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go
@@ -0,0 +1,881 @@
+// Package stdlib is the compatibility layer from pgx to database/sql.
+//
+// A database/sql connection can be established through sql.Open.
+//
+// db, err := sql.Open("pgx", "postgres://pgx_md5:secret@localhost:5432/pgx_test?sslmode=disable")
+// if err != nil {
+// return err
+// }
+//
+// Or from a keyword/value string.
+//
+// db, err := sql.Open("pgx", "user=postgres password=secret host=localhost port=5432 database=pgx_test sslmode=disable")
+// if err != nil {
+// return err
+// }
+//
+// Or from a *pgxpool.Pool.
+//
+// pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
+// if err != nil {
+// return err
+// }
+//
+// db := stdlib.OpenDBFromPool(pool)
+//
+// Or a pgx.ConnConfig can be used to set configuration not accessible via connection string. In this case the
+// pgx.ConnConfig must first be registered with the driver. This registration returns a connection string which is used
+// with sql.Open.
+//
+// connConfig, _ := pgx.ParseConfig(os.Getenv("DATABASE_URL"))
+// connConfig.Tracer = &tracelog.TraceLog{Logger: myLogger, LogLevel: tracelog.LogLevelInfo}
+// connStr := stdlib.RegisterConnConfig(connConfig)
+// db, _ := sql.Open("pgx", connStr)
+//
+// pgx uses standard PostgreSQL positional parameters in queries. e.g. $1, $2. It does not support named parameters.
+//
+// db.QueryRow("select * from users where id=$1", userID)
+//
+// (*sql.Conn) Raw() can be used to get a *pgx.Conn from the standard database/sql.DB connection pool. This allows
+// operations that use pgx specific functionality.
+//
+// // Given db is a *sql.DB
+// conn, err := db.Conn(context.Background())
+// if err != nil {
+// // handle error from acquiring connection from DB pool
+// }
+//
+// err = conn.Raw(func(driverConn any) error {
+// conn := driverConn.(*stdlib.Conn).Conn() // conn is a *pgx.Conn
+// // Do pgx specific stuff with conn
+// conn.CopyFrom(...)
+// return nil
+// })
+// if err != nil {
+// // handle error that occurred while using *pgx.Conn
+// }
+//
+// # PostgreSQL Specific Data Types
+//
+// The pgtype package provides support for PostgreSQL specific types. *pgtype.Map.SQLScanner is an adapter that makes
+// these types usable as a sql.Scanner.
+//
+// m := pgtype.NewMap()
+// var a []int64
+// err := db.QueryRow("select '{1,2,3}'::bigint[]").Scan(m.SQLScanner(&a))
+package stdlib
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgtype"
+ "github.com/jackc/pgx/v5/pgxpool"
+)
+
+// Only intrinsic types should be binary format with database/sql.
+var databaseSQLResultFormats pgx.QueryResultFormatsByOID
+
+var pgxDriver *Driver
+
+func init() {
+ pgxDriver = &Driver{
+ configs: make(map[string]*pgx.ConnConfig),
+ }
+
+ // if pgx driver was already registered by different pgx major version then we
+ // skip registration under the default name.
+ if !contains(sql.Drivers(), "pgx") {
+ sql.Register("pgx", pgxDriver)
+ }
+ sql.Register("pgx/v5", pgxDriver)
+
+ databaseSQLResultFormats = pgx.QueryResultFormatsByOID{
+ pgtype.BoolOID: 1,
+ pgtype.ByteaOID: 1,
+ pgtype.CIDOID: 1,
+ pgtype.DateOID: 1,
+ pgtype.Float4OID: 1,
+ pgtype.Float8OID: 1,
+ pgtype.Int2OID: 1,
+ pgtype.Int4OID: 1,
+ pgtype.Int8OID: 1,
+ pgtype.OIDOID: 1,
+ pgtype.TimestampOID: 1,
+ pgtype.TimestamptzOID: 1,
+ pgtype.XIDOID: 1,
+ }
+}
+
+// TODO replace by slices.Contains when experimental package will be merged to stdlib
+// https://pkg.go.dev/golang.org/x/exp/slices#Contains
+func contains(list []string, y string) bool {
+ for _, x := range list {
+ if x == y {
+ return true
+ }
+ }
+ return false
+}
+
+// OptionOpenDB options for configuring the driver when opening a new db pool.
+type OptionOpenDB func(*connector)
+
+// OptionBeforeConnect provides a callback for before connect. It is passed a shallow copy of the ConnConfig that will
+// be used to connect, so only its immediate members should be modified. Used only if db is opened with *pgx.ConnConfig.
+func OptionBeforeConnect(bc func(context.Context, *pgx.ConnConfig) error) OptionOpenDB {
+ return func(dc *connector) {
+ dc.BeforeConnect = bc
+ }
+}
+
+// OptionAfterConnect provides a callback for after connect. Used only if db is opened with *pgx.ConnConfig.
+func OptionAfterConnect(ac func(context.Context, *pgx.Conn) error) OptionOpenDB {
+ return func(dc *connector) {
+ dc.AfterConnect = ac
+ }
+}
+
+// OptionResetSession provides a callback that can be used to add custom logic prior to executing a query on the
+// connection if the connection has been used before.
+// If ResetSessionFunc returns ErrBadConn error the connection will be discarded.
+func OptionResetSession(rs func(context.Context, *pgx.Conn) error) OptionOpenDB {
+ return func(dc *connector) {
+ dc.ResetSession = rs
+ }
+}
+
+// RandomizeHostOrderFunc is a BeforeConnect hook that randomizes the host order in the provided connConfig, so that a
+// new host becomes primary each time. This is useful to distribute connections for multi-master databases like
+// CockroachDB. If you use this you likely should set https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime as well
+// to ensure that connections are periodically rebalanced across your nodes.
+func RandomizeHostOrderFunc(ctx context.Context, connConfig *pgx.ConnConfig) error {
+ if len(connConfig.Fallbacks) == 0 {
+ return nil
+ }
+
+ newFallbacks := append([]*pgconn.FallbackConfig{{
+ Host: connConfig.Host,
+ Port: connConfig.Port,
+ TLSConfig: connConfig.TLSConfig,
+ }}, connConfig.Fallbacks...)
+
+ rand.Shuffle(len(newFallbacks), func(i, j int) {
+ newFallbacks[i], newFallbacks[j] = newFallbacks[j], newFallbacks[i]
+ })
+
+ // Use the one that sorted last as the primary and keep the rest as the fallbacks
+ newPrimary := newFallbacks[len(newFallbacks)-1]
+ connConfig.Host = newPrimary.Host
+ connConfig.Port = newPrimary.Port
+ connConfig.TLSConfig = newPrimary.TLSConfig
+ connConfig.Fallbacks = newFallbacks[:len(newFallbacks)-1]
+ return nil
+}
+
+func GetConnector(config pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector {
+ c := connector{
+ ConnConfig: config,
+ BeforeConnect: func(context.Context, *pgx.ConnConfig) error { return nil }, // noop before connect by default
+ AfterConnect: func(context.Context, *pgx.Conn) error { return nil }, // noop after connect by default
+ ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default
+ driver: pgxDriver,
+ }
+
+ for _, opt := range opts {
+ opt(&c)
+ }
+ return c
+}
+
+// GetPoolConnector creates a new driver.Connector from the given *pgxpool.Pool. By using this be sure to set the
+// maximum idle connections of the *sql.DB created with this connector to zero since they must be managed from the
+// *pgxpool.Pool. This is required to avoid acquiring all the connections from the pgxpool and starving any direct
+// users of the pgxpool.
+func GetPoolConnector(pool *pgxpool.Pool, opts ...OptionOpenDB) driver.Connector {
+ c := connector{
+ pool: pool,
+ ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default
+ driver: pgxDriver,
+ }
+
+ for _, opt := range opts {
+ opt(&c)
+ }
+
+ return c
+}
+
+func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
+ c := GetConnector(config, opts...)
+ return sql.OpenDB(c)
+}
+
+// OpenDBFromPool creates a new *sql.DB from the given *pgxpool.Pool. Note that this method automatically sets the
+// maximum number of idle connections in *sql.DB to zero, since they must be managed from the *pgxpool.Pool. This is
+// required to avoid acquiring all the connections from the pgxpool and starving any direct users of the pgxpool.
+func OpenDBFromPool(pool *pgxpool.Pool, opts ...OptionOpenDB) *sql.DB {
+ c := GetPoolConnector(pool, opts...)
+ db := sql.OpenDB(c)
+ db.SetMaxIdleConns(0)
+ return db
+}
+
+type connector struct {
+ pgx.ConnConfig
+ pool *pgxpool.Pool
+ BeforeConnect func(context.Context, *pgx.ConnConfig) error // function to call before creation of every new connection
+ AfterConnect func(context.Context, *pgx.Conn) error // function to call after creation of every new connection
+ ResetSession func(context.Context, *pgx.Conn) error // function is called before a connection is reused
+ driver *Driver
+}
+
+// Connect implement driver.Connector interface
+func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
+ var (
+ connConfig pgx.ConnConfig
+ conn *pgx.Conn
+ close func(context.Context) error
+ err error
+ )
+
+ if c.pool == nil {
+ // Create a shallow copy of the config, so that BeforeConnect can safely modify it
+ connConfig = c.ConnConfig
+
+ if err = c.BeforeConnect(ctx, &connConfig); err != nil {
+ return nil, err
+ }
+
+ if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil {
+ return nil, err
+ }
+
+ if err = c.AfterConnect(ctx, conn); err != nil {
+ return nil, err
+ }
+
+ close = conn.Close
+ } else {
+ var pconn *pgxpool.Conn
+
+ pconn, err = c.pool.Acquire(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ conn = pconn.Conn()
+
+ close = func(_ context.Context) error {
+ pconn.Release()
+ return nil
+ }
+ }
+
+ return &Conn{
+ conn: conn,
+ close: close,
+ driver: c.driver,
+ connConfig: connConfig,
+ resetSessionFunc: c.ResetSession,
+ psRefCounts: make(map[*pgconn.StatementDescription]int),
+ }, nil
+}
+
+// Driver implement driver.Connector interface
+func (c connector) Driver() driver.Driver {
+ return c.driver
+}
+
+// GetDefaultDriver returns the driver initialized in the init function
+// and used when the pgx driver is registered.
+func GetDefaultDriver() driver.Driver {
+ return pgxDriver
+}
+
+type Driver struct {
+ configMutex sync.Mutex
+ configs map[string]*pgx.ConnConfig
+ sequence int
+}
+
+func (d *Driver) Open(name string) (driver.Conn, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // Ensure eventual timeout
+ defer cancel()
+
+ connector, err := d.OpenConnector(name)
+ if err != nil {
+ return nil, err
+ }
+ return connector.Connect(ctx)
+}
+
+func (d *Driver) OpenConnector(name string) (driver.Connector, error) {
+ return &driverConnector{driver: d, name: name}, nil
+}
+
+func (d *Driver) registerConnConfig(c *pgx.ConnConfig) string {
+ d.configMutex.Lock()
+ connStr := fmt.Sprintf("registeredConnConfig%d", d.sequence)
+ d.sequence++
+ d.configs[connStr] = c
+ d.configMutex.Unlock()
+ return connStr
+}
+
+func (d *Driver) unregisterConnConfig(connStr string) {
+ d.configMutex.Lock()
+ delete(d.configs, connStr)
+ d.configMutex.Unlock()
+}
+
+type driverConnector struct {
+ driver *Driver
+ name string
+}
+
+func (dc *driverConnector) Connect(ctx context.Context) (driver.Conn, error) {
+ var connConfig *pgx.ConnConfig
+
+ dc.driver.configMutex.Lock()
+ connConfig = dc.driver.configs[dc.name]
+ dc.driver.configMutex.Unlock()
+
+ if connConfig == nil {
+ var err error
+ connConfig, err = pgx.ParseConfig(dc.name)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ conn, err := pgx.ConnectConfig(ctx, connConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ c := &Conn{
+ conn: conn,
+ close: conn.Close,
+ driver: dc.driver,
+ connConfig: *connConfig,
+ resetSessionFunc: func(context.Context, *pgx.Conn) error { return nil },
+ psRefCounts: make(map[*pgconn.StatementDescription]int),
+ }
+
+ return c, nil
+}
+
+func (dc *driverConnector) Driver() driver.Driver {
+ return dc.driver
+}
+
+// RegisterConnConfig registers a ConnConfig and returns the connection string to use with Open.
+func RegisterConnConfig(c *pgx.ConnConfig) string {
+ return pgxDriver.registerConnConfig(c)
+}
+
+// UnregisterConnConfig removes the ConnConfig registration for connStr.
+func UnregisterConnConfig(connStr string) {
+ pgxDriver.unregisterConnConfig(connStr)
+}
+
+type Conn struct {
+ conn *pgx.Conn
+ close func(context.Context) error
+ driver *Driver
+ connConfig pgx.ConnConfig
+ resetSessionFunc func(context.Context, *pgx.Conn) error // Function is called before a connection is reused
+ lastResetSessionTime time.Time
+
+ // psRefCounts contains reference counts for prepared statements. Prepare uses the underlying pgx logic to generate
+ // deterministic statement names from the statement text. If this query has already been prepared then the existing
+ // *pgconn.StatementDescription will be returned. However, this means that if Close is called on the returned Stmt
+ // then the underlying prepared statement will be closed even when the underlying prepared statement is still in use
+ // by another database/sql Stmt. To prevent this psRefCounts keeps track of how many database/sql statements are using
+ // the same underlying statement and only closes the underlying statement when the reference count reaches 0.
+ psRefCounts map[*pgconn.StatementDescription]int
+}
+
+// Conn returns the underlying *pgx.Conn
+func (c *Conn) Conn() *pgx.Conn {
+ return c.conn
+}
+
+func (c *Conn) Prepare(query string) (driver.Stmt, error) {
+ return c.PrepareContext(context.Background(), query)
+}
+
+func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if c.conn.IsClosed() {
+ return nil, driver.ErrBadConn
+ }
+
+ sd, err := c.conn.Prepare(ctx, query, query)
+ if err != nil {
+ return nil, err
+ }
+ c.psRefCounts[sd]++
+
+ return &Stmt{sd: sd, conn: c}, nil
+}
+
+func (c *Conn) Close() error {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ return c.close(ctx)
+}
+
+func (c *Conn) Begin() (driver.Tx, error) {
+ return c.BeginTx(context.Background(), driver.TxOptions{})
+}
+
+func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if c.conn.IsClosed() {
+ return nil, driver.ErrBadConn
+ }
+
+ var pgxOpts pgx.TxOptions
+ switch sql.IsolationLevel(opts.Isolation) {
+ case sql.LevelDefault:
+ case sql.LevelReadUncommitted:
+ pgxOpts.IsoLevel = pgx.ReadUncommitted
+ case sql.LevelReadCommitted:
+ pgxOpts.IsoLevel = pgx.ReadCommitted
+ case sql.LevelRepeatableRead, sql.LevelSnapshot:
+ pgxOpts.IsoLevel = pgx.RepeatableRead
+ case sql.LevelSerializable:
+ pgxOpts.IsoLevel = pgx.Serializable
+ default:
+ return nil, fmt.Errorf("unsupported isolation: %v", opts.Isolation)
+ }
+
+ if opts.ReadOnly {
+ pgxOpts.AccessMode = pgx.ReadOnly
+ }
+
+ tx, err := c.conn.BeginTx(ctx, pgxOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapTx{ctx: ctx, tx: tx}, nil
+}
+
+func (c *Conn) ExecContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Result, error) {
+ if c.conn.IsClosed() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := namedValueToInterface(argsV)
+
+ commandTag, err := c.conn.Exec(ctx, query, args...)
+ // if we got a network error before we had a chance to send the query, retry
+ if err != nil {
+ if pgconn.SafeToRetry(err) {
+ return nil, driver.ErrBadConn
+ }
+ }
+ return driver.RowsAffected(commandTag.RowsAffected()), err
+}
+
+func (c *Conn) QueryContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Rows, error) {
+ if c.conn.IsClosed() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := []any{databaseSQLResultFormats}
+ args = append(args, namedValueToInterface(argsV)...)
+
+ rows, err := c.conn.Query(ctx, query, args...)
+ if err != nil {
+ if pgconn.SafeToRetry(err) {
+ return nil, driver.ErrBadConn
+ }
+ return nil, err
+ }
+
+ // Preload first row because otherwise we won't know what columns are available when database/sql asks.
+ more := rows.Next()
+ if err = rows.Err(); err != nil {
+ rows.Close()
+ return nil, err
+ }
+ return &Rows{conn: c, rows: rows, skipNext: true, skipNextMore: more}, nil
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ if c.conn.IsClosed() {
+ return driver.ErrBadConn
+ }
+
+ err := c.conn.Ping(ctx)
+ if err != nil {
+ // A Ping failure implies some sort of fatal state. The connection is almost certainly already closed by the
+ // failure, but manually close it just to be sure.
+ c.Close()
+ return driver.ErrBadConn
+ }
+
+ return nil
+}
+
+func (c *Conn) CheckNamedValue(*driver.NamedValue) error {
+ // Underlying pgx supports sql.Scanner and driver.Valuer interfaces natively. So everything can be passed through directly.
+ return nil
+}
+
+func (c *Conn) ResetSession(ctx context.Context) error {
+ if c.conn.IsClosed() {
+ return driver.ErrBadConn
+ }
+
+ now := time.Now()
+ if now.Sub(c.lastResetSessionTime) > time.Second {
+ if err := c.conn.PgConn().Ping(ctx); err != nil {
+ return driver.ErrBadConn
+ }
+ }
+ c.lastResetSessionTime = now
+
+ return c.resetSessionFunc(ctx, c.conn)
+}
+
+type Stmt struct {
+ sd *pgconn.StatementDescription
+ conn *Conn
+}
+
+func (s *Stmt) Close() error {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ refCount := s.conn.psRefCounts[s.sd]
+ if refCount == 1 {
+ delete(s.conn.psRefCounts, s.sd)
+ } else {
+ s.conn.psRefCounts[s.sd]--
+ return nil
+ }
+
+ return s.conn.conn.Deallocate(ctx, s.sd.SQL)
+}
+
+func (s *Stmt) NumInput() int {
+ return len(s.sd.ParamOIDs)
+}
+
+func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
+ return nil, errors.New("Stmt.Exec deprecated and not implemented")
+}
+
+func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
+ return s.conn.ExecContext(ctx, s.sd.SQL, argsV)
+}
+
+func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
+ return nil, errors.New("Stmt.Query deprecated and not implemented")
+}
+
+func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
+ return s.conn.QueryContext(ctx, s.sd.SQL, argsV)
+}
+
+type rowValueFunc func(src []byte) (driver.Value, error)
+
+type Rows struct {
+ conn *Conn
+ rows pgx.Rows
+ valueFuncs []rowValueFunc
+ skipNext bool
+ skipNextMore bool
+
+ columnNames []string
+}
+
+func (r *Rows) Columns() []string {
+ if r.columnNames == nil {
+ fields := r.rows.FieldDescriptions()
+ r.columnNames = make([]string, len(fields))
+ for i, fd := range fields {
+ r.columnNames[i] = string(fd.Name)
+ }
+ }
+
+ return r.columnNames
+}
+
+// ColumnTypeDatabaseTypeName returns the database system type name. If the name is unknown the OID is returned.
+func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
+ if dt, ok := r.conn.conn.TypeMap().TypeForOID(r.rows.FieldDescriptions()[index].DataTypeOID); ok {
+ return strings.ToUpper(dt.Name)
+ }
+
+ return strconv.FormatInt(int64(r.rows.FieldDescriptions()[index].DataTypeOID), 10)
+}
+
+const varHeaderSize = 4
+
+// ColumnTypeLength returns the length of the column type if the column is a
+// variable length type. If the column is not a variable length type ok
+// should return false.
+func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
+ fd := r.rows.FieldDescriptions()[index]
+
+ switch fd.DataTypeOID {
+ case pgtype.TextOID, pgtype.ByteaOID:
+ return math.MaxInt64, true
+ case pgtype.VarcharOID, pgtype.BPCharArrayOID:
+ return int64(fd.TypeModifier - varHeaderSize), true
+ default:
+ return 0, false
+ }
+}
+
+// ColumnTypePrecisionScale should return the precision and scale for decimal
+// types. If not applicable, ok should be false.
+func (r *Rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+ fd := r.rows.FieldDescriptions()[index]
+
+ switch fd.DataTypeOID {
+ case pgtype.NumericOID:
+ mod := fd.TypeModifier - varHeaderSize
+ precision = int64((mod >> 16) & 0xffff)
+ scale = int64(mod & 0xffff)
+ return precision, scale, true
+ default:
+ return 0, 0, false
+ }
+}
+
+// ColumnTypeScanType returns the value type that can be used to scan types into.
+func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
+ fd := r.rows.FieldDescriptions()[index]
+
+ switch fd.DataTypeOID {
+ case pgtype.Float8OID:
+ return reflect.TypeOf(float64(0))
+ case pgtype.Float4OID:
+ return reflect.TypeOf(float32(0))
+ case pgtype.Int8OID:
+ return reflect.TypeOf(int64(0))
+ case pgtype.Int4OID:
+ return reflect.TypeOf(int32(0))
+ case pgtype.Int2OID:
+ return reflect.TypeOf(int16(0))
+ case pgtype.BoolOID:
+ return reflect.TypeOf(false)
+ case pgtype.NumericOID:
+ return reflect.TypeOf(float64(0))
+ case pgtype.DateOID, pgtype.TimestampOID, pgtype.TimestamptzOID:
+ return reflect.TypeOf(time.Time{})
+ case pgtype.ByteaOID:
+ return reflect.TypeOf([]byte(nil))
+ default:
+ return reflect.TypeOf("")
+ }
+}
+
+func (r *Rows) Close() error {
+ r.rows.Close()
+ return r.rows.Err()
+}
+
+func (r *Rows) Next(dest []driver.Value) error {
+ m := r.conn.conn.TypeMap()
+ fieldDescriptions := r.rows.FieldDescriptions()
+
+ if r.valueFuncs == nil {
+ r.valueFuncs = make([]rowValueFunc, len(fieldDescriptions))
+
+ for i, fd := range fieldDescriptions {
+ dataTypeOID := fd.DataTypeOID
+ format := fd.Format
+
+ switch fd.DataTypeOID {
+ case pgtype.BoolOID:
+ var d bool
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return d, err
+ }
+ case pgtype.ByteaOID:
+ var d []byte
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return d, err
+ }
+ case pgtype.CIDOID, pgtype.OIDOID, pgtype.XIDOID:
+ var d pgtype.Uint32
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d.Value()
+ }
+ case pgtype.DateOID:
+ var d pgtype.Date
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d.Value()
+ }
+ case pgtype.Float4OID:
+ var d float32
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return float64(d), err
+ }
+ case pgtype.Float8OID:
+ var d float64
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return d, err
+ }
+ case pgtype.Int2OID:
+ var d int16
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return int64(d), err
+ }
+ case pgtype.Int4OID:
+ var d int32
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return int64(d), err
+ }
+ case pgtype.Int8OID:
+ var d int64
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return d, err
+ }
+ case pgtype.JSONOID, pgtype.JSONBOID:
+ var d []byte
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ }
+ case pgtype.TimestampOID:
+ var d pgtype.Timestamp
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d.Value()
+ }
+ case pgtype.TimestamptzOID:
+ var d pgtype.Timestamptz
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d.Value()
+ }
+ default:
+ var d string
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ return d, err
+ }
+ }
+ }
+ }
+
+ var more bool
+ if r.skipNext {
+ more = r.skipNextMore
+ r.skipNext = false
+ } else {
+ more = r.rows.Next()
+ }
+
+ if !more {
+ if r.rows.Err() == nil {
+ return io.EOF
+ } else {
+ return r.rows.Err()
+ }
+ }
+
+ for i, rv := range r.rows.RawValues() {
+ if rv != nil {
+ var err error
+ dest[i], err = r.valueFuncs[i](rv)
+ if err != nil {
+ return fmt.Errorf("convert field %d failed: %w", i, err)
+ }
+ } else {
+ dest[i] = nil
+ }
+ }
+
+ return nil
+}
+
+func valueToInterface(argsV []driver.Value) []any {
+ args := make([]any, 0, len(argsV))
+ for _, v := range argsV {
+ if v != nil {
+ args = append(args, v.(any))
+ } else {
+ args = append(args, nil)
+ }
+ }
+ return args
+}
+
+func namedValueToInterface(argsV []driver.NamedValue) []any {
+ args := make([]any, 0, len(argsV))
+ for _, v := range argsV {
+ if v.Value != nil {
+ args = append(args, v.Value.(any))
+ } else {
+ args = append(args, nil)
+ }
+ }
+ return args
+}
+
+type wrapTx struct {
+ ctx context.Context
+ tx pgx.Tx
+}
+
+func (wtx wrapTx) Commit() error { return wtx.tx.Commit(wtx.ctx) }
+
+func (wtx wrapTx) Rollback() error { return wtx.tx.Rollback(wtx.ctx) }
diff --git a/vendor/github.com/jackc/pgx/v5/tracer.go b/vendor/github.com/jackc/pgx/v5/tracer.go
new file mode 100644
index 0000000..58ca99f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/tracer.go
@@ -0,0 +1,107 @@
+package pgx
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// QueryTracer traces Query, QueryRow, and Exec.
+type QueryTracer interface {
+ // TraceQueryStart is called at the beginning of Query, QueryRow, and Exec calls. The returned context is used for the
+ // rest of the call and will be passed to TraceQueryEnd.
+ TraceQueryStart(ctx context.Context, conn *Conn, data TraceQueryStartData) context.Context
+
+ TraceQueryEnd(ctx context.Context, conn *Conn, data TraceQueryEndData)
+}
+
+type TraceQueryStartData struct {
+ SQL string
+ Args []any
+}
+
+type TraceQueryEndData struct {
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+// BatchTracer traces SendBatch.
+type BatchTracer interface {
+ // TraceBatchStart is called at the beginning of SendBatch calls. The returned context is used for the
+ // rest of the call and will be passed to TraceBatchQuery and TraceBatchEnd.
+ TraceBatchStart(ctx context.Context, conn *Conn, data TraceBatchStartData) context.Context
+
+ TraceBatchQuery(ctx context.Context, conn *Conn, data TraceBatchQueryData)
+ TraceBatchEnd(ctx context.Context, conn *Conn, data TraceBatchEndData)
+}
+
+type TraceBatchStartData struct {
+ Batch *Batch
+}
+
+type TraceBatchQueryData struct {
+ SQL string
+ Args []any
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+type TraceBatchEndData struct {
+ Err error
+}
+
+// CopyFromTracer traces CopyFrom.
+type CopyFromTracer interface {
+ // TraceCopyFromStart is called at the beginning of CopyFrom calls. The returned context is used for the
+ // rest of the call and will be passed to TraceCopyFromEnd.
+ TraceCopyFromStart(ctx context.Context, conn *Conn, data TraceCopyFromStartData) context.Context
+
+ TraceCopyFromEnd(ctx context.Context, conn *Conn, data TraceCopyFromEndData)
+}
+
+type TraceCopyFromStartData struct {
+ TableName Identifier
+ ColumnNames []string
+}
+
+type TraceCopyFromEndData struct {
+ CommandTag pgconn.CommandTag
+ Err error
+}
+
+// PrepareTracer traces Prepare.
+type PrepareTracer interface {
+ // TracePrepareStart is called at the beginning of Prepare calls. The returned context is used for the
+ // rest of the call and will be passed to TracePrepareEnd.
+ TracePrepareStart(ctx context.Context, conn *Conn, data TracePrepareStartData) context.Context
+
+ TracePrepareEnd(ctx context.Context, conn *Conn, data TracePrepareEndData)
+}
+
+type TracePrepareStartData struct {
+ Name string
+ SQL string
+}
+
+type TracePrepareEndData struct {
+ AlreadyPrepared bool
+ Err error
+}
+
+// ConnectTracer traces Connect and ConnectConfig.
+type ConnectTracer interface {
+ // TraceConnectStart is called at the beginning of Connect and ConnectConfig calls. The returned context is used for
+ // the rest of the call and will be passed to TraceConnectEnd.
+ TraceConnectStart(ctx context.Context, data TraceConnectStartData) context.Context
+
+ TraceConnectEnd(ctx context.Context, data TraceConnectEndData)
+}
+
+type TraceConnectStartData struct {
+ ConnConfig *ConnConfig
+}
+
+type TraceConnectEndData struct {
+ Conn *Conn
+ Err error
+}
diff --git a/vendor/github.com/jackc/pgx/v5/tx.go b/vendor/github.com/jackc/pgx/v5/tx.go
new file mode 100644
index 0000000..8feeb51
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/tx.go
@@ -0,0 +1,432 @@
+package pgx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/pgconn"
+)
+
+// TxIsoLevel is the transaction isolation level (serializable, repeatable read, read committed or read uncommitted)
+type TxIsoLevel string
+
+// Transaction isolation levels
+const (
+ Serializable TxIsoLevel = "serializable"
+ RepeatableRead TxIsoLevel = "repeatable read"
+ ReadCommitted TxIsoLevel = "read committed"
+ ReadUncommitted TxIsoLevel = "read uncommitted"
+)
+
+// TxAccessMode is the transaction access mode (read write or read only)
+type TxAccessMode string
+
+// Transaction access modes
+const (
+ ReadWrite TxAccessMode = "read write"
+ ReadOnly TxAccessMode = "read only"
+)
+
+// TxDeferrableMode is the transaction deferrable mode (deferrable or not deferrable)
+type TxDeferrableMode string
+
+// Transaction deferrable modes
+const (
+ Deferrable TxDeferrableMode = "deferrable"
+ NotDeferrable TxDeferrableMode = "not deferrable"
+)
+
+// TxOptions are transaction modes within a transaction block
+type TxOptions struct {
+ IsoLevel TxIsoLevel
+ AccessMode TxAccessMode
+ DeferrableMode TxDeferrableMode
+
+ // BeginQuery is the SQL query that will be executed to begin the transaction. This allows using non-standard syntax
+ // such as BEGIN PRIORITY HIGH with CockroachDB. If set this will override the other settings.
+ BeginQuery string
+}
+
+var emptyTxOptions TxOptions
+
+func (txOptions TxOptions) beginSQL() string {
+ if txOptions == emptyTxOptions {
+ return "begin"
+ }
+
+ if txOptions.BeginQuery != "" {
+ return txOptions.BeginQuery
+ }
+
+ var buf strings.Builder
+ buf.Grow(64) // 64 - maximum length of string with available options
+ buf.WriteString("begin")
+
+ if txOptions.IsoLevel != "" {
+ buf.WriteString(" isolation level ")
+ buf.WriteString(string(txOptions.IsoLevel))
+ }
+ if txOptions.AccessMode != "" {
+ buf.WriteByte(' ')
+ buf.WriteString(string(txOptions.AccessMode))
+ }
+ if txOptions.DeferrableMode != "" {
+ buf.WriteByte(' ')
+ buf.WriteString(string(txOptions.DeferrableMode))
+ }
+
+ return buf.String()
+}
+
+var ErrTxClosed = errors.New("tx is closed")
+
+// ErrTxCommitRollback occurs when an error has occurred in a transaction and
+// Commit() is called. PostgreSQL accepts COMMIT on aborted transactions, but
+// it is treated as ROLLBACK.
+var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
+
+// Begin starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
+// auto-rollback on context cancellation.
+func (c *Conn) Begin(ctx context.Context) (Tx, error) {
+ return c.BeginTx(ctx, TxOptions{})
+}
+
+// BeginTx starts a transaction with txOptions determining the transaction mode. Unlike database/sql, the context only
+// affects the begin command. i.e. there is no auto-rollback on context cancellation.
+func (c *Conn) BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error) {
+ _, err := c.Exec(ctx, txOptions.beginSQL())
+ if err != nil {
+ // begin should never fail unless there is an underlying connection issue or
+ // a context timeout. In either case, the connection is possibly broken.
+ c.die(errors.New("failed to begin transaction"))
+ return nil, err
+ }
+
+ return &dbTx{conn: c}, nil
+}
+
+// Tx represents a database transaction.
+//
+// Tx is an interface instead of a struct to enable connection pools to be implemented without relying on internal pgx
+// state, to support pseudo-nested transactions with savepoints, and to allow tests to mock transactions. However,
+// adding a method to an interface is technically a breaking change. If new methods are added to Conn it may be
+// desirable to add them to Tx as well. Because of this the Tx interface is partially excluded from semantic version
+// requirements. Methods will not be removed or changed, but new methods may be added.
+type Tx interface {
+ // Begin starts a pseudo nested transaction.
+ Begin(ctx context.Context) (Tx, error)
+
+ // Commit commits the transaction if this is a real transaction or releases the savepoint if this is a pseudo nested
+ // transaction. Commit will return an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is
+ // otherwise safe to call multiple times. If the commit fails with a rollback status (e.g. the transaction was already
+ // in a broken state) then an error where errors.Is(ErrTxCommitRollback) is true will be returned.
+ Commit(ctx context.Context) error
+
+ // Rollback rolls back the transaction if this is a real transaction or rolls back to the savepoint if this is a
+ // pseudo nested transaction. Rollback will return an error where errors.Is(ErrTxClosed) is true if the Tx is already
+ // closed, but is otherwise safe to call multiple times. Hence, a defer tx.Rollback() is safe even if tx.Commit() will
+ // be called first in a non-error condition. Any other failure of a real transaction will result in the connection
+ // being closed.
+ Rollback(ctx context.Context) error
+
+ CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error)
+ SendBatch(ctx context.Context, b *Batch) BatchResults
+ LargeObjects() LargeObjects
+
+ Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error)
+
+ Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error)
+ Query(ctx context.Context, sql string, args ...any) (Rows, error)
+ QueryRow(ctx context.Context, sql string, args ...any) Row
+
+ // Conn returns the underlying *Conn that on which this transaction is executing.
+ Conn() *Conn
+}
+
+// dbTx represents a database transaction.
+//
+// All dbTx methods return ErrTxClosed if Commit or Rollback has already been
+// called on the dbTx.
+type dbTx struct {
+ conn *Conn
+ savepointNum int64
+ closed bool
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (tx *dbTx) Begin(ctx context.Context) (Tx, error) {
+ if tx.closed {
+ return nil, ErrTxClosed
+ }
+
+ tx.savepointNum++
+ _, err := tx.conn.Exec(ctx, "savepoint sp_"+strconv.FormatInt(tx.savepointNum, 10))
+ if err != nil {
+ return nil, err
+ }
+
+ return &dbSimulatedNestedTx{tx: tx, savepointNum: tx.savepointNum}, nil
+}
+
+// Commit commits the transaction.
+func (tx *dbTx) Commit(ctx context.Context) error {
+ if tx.closed {
+ return ErrTxClosed
+ }
+
+ commandTag, err := tx.conn.Exec(ctx, "commit")
+ tx.closed = true
+ if err != nil {
+ if tx.conn.PgConn().TxStatus() != 'I' {
+ _ = tx.conn.Close(ctx) // already have error to return
+ }
+ return err
+ }
+ if commandTag.String() == "ROLLBACK" {
+ return ErrTxCommitRollback
+ }
+
+ return nil
+}
+
+// Rollback rolls back the transaction. Rollback will return ErrTxClosed if the
+// Tx is already closed, but is otherwise safe to call multiple times. Hence, a
+// defer tx.Rollback() is safe even if tx.Commit() will be called first in a
+// non-error condition.
+func (tx *dbTx) Rollback(ctx context.Context) error {
+ if tx.closed {
+ return ErrTxClosed
+ }
+
+ _, err := tx.conn.Exec(ctx, "rollback")
+ tx.closed = true
+ if err != nil {
+ // A rollback failure leaves the connection in an undefined state
+ tx.conn.die(fmt.Errorf("rollback failed: %w", err))
+ return err
+ }
+
+ return nil
+}
+
+// Exec delegates to the underlying *Conn
+func (tx *dbTx) Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ if tx.closed {
+ return pgconn.CommandTag{}, ErrTxClosed
+ }
+
+ return tx.conn.Exec(ctx, sql, arguments...)
+}
+
+// Prepare delegates to the underlying *Conn
+func (tx *dbTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ if tx.closed {
+ return nil, ErrTxClosed
+ }
+
+ return tx.conn.Prepare(ctx, name, sql)
+}
+
+// Query delegates to the underlying *Conn
+func (tx *dbTx) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if tx.closed {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ err := ErrTxClosed
+ return &baseRows{closed: true, err: err}, err
+ }
+
+ return tx.conn.Query(ctx, sql, args...)
+}
+
+// QueryRow delegates to the underlying *Conn
+func (tx *dbTx) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := tx.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// CopyFrom delegates to the underlying *Conn
+func (tx *dbTx) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ if tx.closed {
+ return 0, ErrTxClosed
+ }
+
+ return tx.conn.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// SendBatch delegates to the underlying *Conn
+func (tx *dbTx) SendBatch(ctx context.Context, b *Batch) BatchResults {
+ if tx.closed {
+ return &batchResults{err: ErrTxClosed}
+ }
+
+ return tx.conn.SendBatch(ctx, b)
+}
+
+// LargeObjects returns a LargeObjects instance for the transaction.
+func (tx *dbTx) LargeObjects() LargeObjects {
+ return LargeObjects{tx: tx}
+}
+
+func (tx *dbTx) Conn() *Conn {
+ return tx.conn
+}
+
+// dbSimulatedNestedTx represents a simulated nested transaction implemented by a savepoint.
+type dbSimulatedNestedTx struct {
+ tx Tx
+ savepointNum int64
+ closed bool
+}
+
+// Begin starts a pseudo nested transaction implemented with a savepoint.
+func (sp *dbSimulatedNestedTx) Begin(ctx context.Context) (Tx, error) {
+ if sp.closed {
+ return nil, ErrTxClosed
+ }
+
+ return sp.tx.Begin(ctx)
+}
+
+// Commit releases the savepoint essentially committing the pseudo nested transaction.
+func (sp *dbSimulatedNestedTx) Commit(ctx context.Context) error {
+ if sp.closed {
+ return ErrTxClosed
+ }
+
+ _, err := sp.Exec(ctx, "release savepoint sp_"+strconv.FormatInt(sp.savepointNum, 10))
+ sp.closed = true
+ return err
+}
+
+// Rollback rolls back to the savepoint essentially rolling back the pseudo nested transaction. Rollback will return
+// ErrTxClosed if the dbSavepoint is already closed, but is otherwise safe to call multiple times. Hence, a defer sp.Rollback()
+// is safe even if sp.Commit() will be called first in a non-error condition.
+func (sp *dbSimulatedNestedTx) Rollback(ctx context.Context) error {
+ if sp.closed {
+ return ErrTxClosed
+ }
+
+ _, err := sp.Exec(ctx, "rollback to savepoint sp_"+strconv.FormatInt(sp.savepointNum, 10))
+ sp.closed = true
+ return err
+}
+
+// Exec delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+ if sp.closed {
+ return pgconn.CommandTag{}, ErrTxClosed
+ }
+
+ return sp.tx.Exec(ctx, sql, arguments...)
+}
+
+// Prepare delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+ if sp.closed {
+ return nil, ErrTxClosed
+ }
+
+ return sp.tx.Prepare(ctx, name, sql)
+}
+
+// Query delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+ if sp.closed {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ err := ErrTxClosed
+ return &baseRows{closed: true, err: err}, err
+ }
+
+ return sp.tx.Query(ctx, sql, args...)
+}
+
+// QueryRow delegates to the underlying Tx
+func (sp *dbSimulatedNestedTx) QueryRow(ctx context.Context, sql string, args ...any) Row {
+ rows, _ := sp.Query(ctx, sql, args...)
+ return (*connRow)(rows.(*baseRows))
+}
+
+// CopyFrom delegates to the underlying *Conn
+func (sp *dbSimulatedNestedTx) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+ if sp.closed {
+ return 0, ErrTxClosed
+ }
+
+ return sp.tx.CopyFrom(ctx, tableName, columnNames, rowSrc)
+}
+
+// SendBatch delegates to the underlying *Conn
+func (sp *dbSimulatedNestedTx) SendBatch(ctx context.Context, b *Batch) BatchResults {
+ if sp.closed {
+ return &batchResults{err: ErrTxClosed}
+ }
+
+ return sp.tx.SendBatch(ctx, b)
+}
+
+func (sp *dbSimulatedNestedTx) LargeObjects() LargeObjects {
+ return LargeObjects{tx: sp}
+}
+
+func (sp *dbSimulatedNestedTx) Conn() *Conn {
+ return sp.tx.Conn()
+}
+
+// BeginFunc calls Begin on db and then calls fn. If fn does not return an error then it calls Commit on db. If fn
+// returns an error it calls Rollback on db. The context will be used when executing the transaction control statements
+// (BEGIN, ROLLBACK, and COMMIT) but does not otherwise affect the execution of fn.
+func BeginFunc(
+ ctx context.Context,
+ db interface {
+ Begin(ctx context.Context) (Tx, error)
+ },
+ fn func(Tx) error,
+) (err error) {
+ var tx Tx
+ tx, err = db.Begin(ctx)
+ if err != nil {
+ return err
+ }
+
+ return beginFuncExec(ctx, tx, fn)
+}
+
+// BeginTxFunc calls BeginTx on db and then calls fn. If fn does not return an error then it calls Commit on db. If fn
+// returns an error it calls Rollback on db. The context will be used when executing the transaction control statements
+// (BEGIN, ROLLBACK, and COMMIT) but does not otherwise affect the execution of fn.
+func BeginTxFunc(
+ ctx context.Context,
+ db interface {
+ BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error)
+ },
+ txOptions TxOptions,
+ fn func(Tx) error,
+) (err error) {
+ var tx Tx
+ tx, err = db.BeginTx(ctx, txOptions)
+ if err != nil {
+ return err
+ }
+
+ return beginFuncExec(ctx, tx, fn)
+}
+
+func beginFuncExec(ctx context.Context, tx Tx, fn func(Tx) error) (err error) {
+ defer func() {
+ rollbackErr := tx.Rollback(ctx)
+ if rollbackErr != nil && !errors.Is(rollbackErr, ErrTxClosed) {
+ err = rollbackErr
+ }
+ }()
+
+ fErr := fn(tx)
+ if fErr != nil {
+ _ = tx.Rollback(ctx) // ignore rollback error as there is already an error to return
+ return fErr
+ }
+
+ return tx.Commit(ctx)
+}
diff --git a/vendor/github.com/jackc/pgx/v5/values.go b/vendor/github.com/jackc/pgx/v5/values.go
new file mode 100644
index 0000000..6e2ff30
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/values.go
@@ -0,0 +1,63 @@
+package pgx
+
+import (
+ "errors"
+
+ "github.com/jackc/pgx/v5/internal/pgio"
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+// PostgreSQL format codes
+const (
+ TextFormatCode = 0
+ BinaryFormatCode = 1
+)
+
+func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) {
+ buf, err := m.Encode(0, TextFormatCode, arg, []byte{})
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ return string(buf), nil
+}
+
+func encodeCopyValue(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := m.Encode(oid, BinaryFormatCode, arg, buf)
+ if err != nil {
+ if argBuf2, err2 := tryScanStringCopyValueThenEncode(m, buf, oid, arg); err2 == nil {
+ argBuf = argBuf2
+ } else {
+ return nil, err
+ }
+ }
+
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ return buf, nil
+}
+
+func tryScanStringCopyValueThenEncode(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {
+ s, ok := arg.(string)
+ if !ok {
+ textBuf, err := m.Encode(oid, TextFormatCode, arg, nil)
+ if err != nil {
+ return nil, errors.New("not a string and cannot be encoded as text")
+ }
+ s = string(textBuf)
+ }
+
+ var v any
+ err := m.Scan(oid, TextFormatCode, []byte(s), &v)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.Encode(oid, BinaryFormatCode, v, buf)
+}