diff --git a/CLAUDE.md b/CLAUDE.md
index a4267581..2de3285e 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -2,6 +2,12 @@
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+## Architecture Overview
+### Core Components
+1. **Backend code** (`engine/`)
+ 1.1. **Entry Points** (`cmd/`)
+2. **Frontend code** (`ui/`)
+
## Build/Test/Lint Commands
- Build all components: `cd engine && make build`
- Lint code: `cd engine && make lint`
@@ -20,4 +26,123 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
- Follow standard Go import ordering
- Group similar functions together
- Error messages should be descriptive and actionable
-- UI uses pnpm for package management
\ No newline at end of file
+- UI uses pnpm for package management
+
+## Important Backend Workflow Notes
+
+- Always run tests, linter and normalize comments BEFORE committing anything
+- Run formatting, code generation, linting and testing on completion
+- Never commit without running completion sequence
+- Run tests and linter after making significant changes to verify functionality
+- IMPORTANT: Never put into commit message any mention of Claude or Claude Code
+- IMPORTANT: Never include "Test plan" sections in PR descriptions
+- Do not add comments that describe changes, progress, or historical modifications
+- Comments should only describe the current state and purpose of the code, not its history or evolution
+- After important functionality added, update README.md accordingly
+- When merging master changes to an active branch, make sure both branches are pulled and up to date first
+- Don't leave commented out code in place
+- Avoid multi-level nesting
+- Avoid multi-level ifs, never use else if
+- Never use goto
+- Avoid else branches if possible
+- Write tests in compact form by fitting struct fields to a single line (up to 130 characters)
+- Before any significant refactoring, ensure all tests pass and consider creating a new branch
+- When refactoring, editing, or fixing failed tests:
+ - Do not redesign fundamental parts of the code architecture
+ - If unable to fix an issue with the current approach, report the problem and ask for guidance
+ - Focus on minimal changes to address the specific issue at hand
+ - Preserve the existing patterns and conventions of the codebase
+
+## Backend Code Style Guidelines
+
+### Import Organization
+- Organize imports in the following order:
+ 1. Standard library packages first (e.g., "fmt", "context")
+ 2. A blank line separator
+ 3. Third-party packages
+ 4. A blank line separator
+ 5. Project imports (e.g., "gitlab.com/postgres-ai/database-lab/v3/pkg/*")
+- Example:
+ ```go
+ import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "github.com/gorilla/mux"
+
+ "gitlab.com/postgres-ai/database-lab/v3/pkg/util/branching"
+ )
+ ```
+
+### Error Handling
+- Return errors to the caller rather than using panics
+- Use descriptive error messages that help with debugging
+- Use error wrapping: `fmt.Errorf("failed to process request: %w", err)`
+- Check errors immediately after function calls
+- Return early when possible to avoid deep nesting
+
+### Variable Naming
+- Use descriptive camelCase names for variables and functions
+- Good: `notFoundHandler`, `requestContext`, `userID`
+- Bad: `not_found_handler`, `x`, `temp1`
+- Be consistent with abbreviations (e.g., `httpClient` not `HTTPClient`)
+- Local scope variables can be short (e.g., "lmt" instead of "orderLimit")
+- Use constants for magic numbers and strings
+- Use meaningful names for constants and enums
+
+### Function Parameters
+- Group related parameters together logically
+- Use descriptive parameter names that indicate their purpose
+- Consider using parameter structs for functions with many (4+) parameters
+- If function returns 3 or more results, consider wrapping in Result/Response struct
+- If function accepts 3 or more input parameters, consider wrapping in Request/Input struct (but never add context to struct)
+
+### Documentation
+- All exported functions, types, and methods must have clear godoc comments
+- Begin comments with the name of the element being documented
+- Include usage examples for complex functions
+- Document any non-obvious behavior or edge cases
+- All comments should be lowercase, except for godoc public functions and methods
+- IMPORTANT: all comments except godoc comments must be lowercase, test messages must be lowercase, log messages must be lowercase
+
+### Code Structure
+- Keep code modular with focused responsibilities
+- Limit file sizes to 300-500 lines when possible
+- Group related functionality in the same package
+- Use interfaces to define behavior and enable mocking for tests
+- Keep code minimal and avoid unnecessary complexity
+- Don't keep old functions for imaginary compatibility
+- Interfaces should be defined on the consumer side (idiomatic Go)
+- Aim to pass interfaces but return concrete types when possible
+- Consider nested functions when they simplify complex functions
+
+### Code Layout
+- Keep cyclomatic complexity under 30
+- Function size preferences:
+ - Aim for functions around 50-60 lines when possible
+ - Don't break down functions too small as it can reduce readability
+ - Maintain focus on a single responsibility per function
+- Keep lines under 130 characters when possible
+- Avoid if-else chains and nested conditionals:
+ - Never use long if-else-if chains; use switch statements instead
+ - Prefer early returns to reduce nesting depth
+ - Extract complex conditions into separate boolean functions or variables
+ - Use context structs or functional options instead of multiple boolean flags
+
+### Testing
+- Write thorough tests with descriptive names (e.g., `TestRouter_HandlesMiddlewareCorrectly`)
+- Prefer subtests or table-based tests, using Testify
+- Use table-driven tests for testing multiple cases with the same logic
+- Test both success and error scenarios
+- Mock external dependencies to ensure unit tests are isolated and fast
+- Aim for at least 80% code coverage
+- Keep tests compact but readable
+- If test has too many subtests, consider splitting it to multiple tests
+- Never disable tests without a good reason and approval
+- Important: Never update code with special conditions to just pass tests
+- Don't create new test files if one already exists matching the source file name
+- Add new tests to existing test files following the same naming and structuring conventions
+- Don't add comments before subtests, t.Run("description") already communicates what test case is doing
+- Never use godoc-style comments for test functions
diff --git a/LICENSE b/LICENSE
index cb43d4eb..b0cc8d52 100644
--- a/LICENSE
+++ b/LICENSE
@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright 2023 Postgres.ai https://postgres.ai/
+ Copyright 2023-2025 Postgres AI https://postgres.ai/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/README.md b/README.md
index 9eada025..8ae9f89a 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,7 @@
-
+
@@ -92,7 +92,7 @@ Read more:
- Theoretical max of snapshots/clones: 264 ([ZFS](https://en.wikipedia.org/wiki/ZFS), default)
- Maximum size of PostgreSQL data directory: 256 quadrillion zebibytes, or 2128 bytes ([ZFS](https://en.wikipedia.org/wiki/ZFS), default)
- Support & technologies
- - Supported PostgreSQL versions: 9.6–17
+ - Supported PostgreSQL versions: 9.6–18
- Thin cloning ([CoW](https://en.wikipedia.org/wiki/Copy-on-write)) technologies: [ZFS](https://en.wikipedia.org/wiki/ZFS) and [LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux))
- UI for manual tasks and API & CLI for automation
- Packaged in Docker containers for all components
diff --git a/engine/.gitlab-ci.yml b/engine/.gitlab-ci.yml
index a048e132..22c505d9 100644
--- a/engine/.gitlab-ci.yml
+++ b/engine/.gitlab-ci.yml
@@ -1,6 +1,6 @@
default:
image:
- name: golang:1.23
+ name: golang:1.24
pull_policy: if-not-present
stages:
@@ -8,7 +8,6 @@ stages:
- build-binary
- build
- integration-test
- - deploy
## Conditions.
.only_engine: &only_engine
@@ -59,7 +58,7 @@ lint:
build-binary-alpine:
<<: *only_engine
image:
- name: golang:1.23-alpine
+ name: golang:1.24-alpine
pull_policy: if-not-present
stage: build-binary
artifacts:
@@ -88,8 +87,8 @@ build-binary-client-master:
- make build-client
# Install google-cloud-sdk.
+ - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg
- echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
- - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
- apt-get update && apt-get install -y google-cloud-sdk
# Authenticate.
@@ -108,8 +107,8 @@ build-binary-client:
- make build-client
# Install google-cloud-sdk.
+ - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg
- echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
- - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
- apt-get update && apt-get install -y google-cloud-sdk
# Authenticate.
@@ -129,8 +128,8 @@ build-binary-client-rc:
- make build-client
# Install google-cloud-sdk.
+ - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg
- echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
- - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
- apt-get update && apt-get install -y google-cloud-sdk
# Authenticate.
@@ -478,6 +477,11 @@ bash-test-17:
variables:
POSTGRES_VERSION: 17
+bash-test-18:
+ <<: *bash_test
+ variables:
+ POSTGRES_VERSION: 18
+
integration-test:
services:
- name: docker:24-dind
@@ -498,26 +502,3 @@ integration-test:
script:
- cd engine
- make test-ci-integration
-
-## Deploy
-.deploy-definition: &deploy_definition
- stage: deploy
- image:
- name: dtzar/helm-kubectl:2.14.1
- pull_policy: if-not-present
- script:
- - bash ./engine/scripts/do.sh subs_envs ./engine/deploy/swagger-ui.yaml /tmp/swagger-ui.yaml
- - kubectl apply --filename /tmp/swagger-ui.yaml -n $NAMESPACE
-
-deploy-swagger-ui-tag-release:
- <<: *only_tag_release
- <<: *deploy_definition
- environment:
- name: production
- variables:
- ENV: production
- NAMESPACE: production
- DOCKER_IMAGE_NAME: "registry.gitlab.com/postgres-ai/database-lab/dblab-swagger-ui"
- before_script:
- - export CLEAN_TAG=$(echo ${CI_COMMIT_TAG#"v"})
- - export TAG="${DOCKER_IMAGE_NAME}:${CLEAN_TAG}"
diff --git a/engine/Dockerfile.dblab-server-debug b/engine/Dockerfile.dblab-server-debug
index af6b1f17..dd8b13c7 100644
--- a/engine/Dockerfile.dblab-server-debug
+++ b/engine/Dockerfile.dblab-server-debug
@@ -1,7 +1,7 @@
# How to start a container: https://postgres.ai/docs/how-to-guides/administration/engine-manage
# Compile stage
-FROM golang:1.23 AS build-env
+FROM golang:1.24 AS build-env
# Build Delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest
diff --git a/engine/Makefile b/engine/Makefile
index 84bf96de..ffb2edf0 100644
--- a/engine/Makefile
+++ b/engine/Makefile
@@ -34,7 +34,7 @@ help: ## Display the help message
all: clean build ## Build all binary components of the project
install-lint: ## Install the linter to $GOPATH/bin which is expected to be in $PATH
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.61.0
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.64.8
run-lint: ## Run linters
golangci-lint run
diff --git a/engine/configs/standard/postgres/default/18/pg_hba.conf b/engine/configs/standard/postgres/default/18/pg_hba.conf
new file mode 100644
index 00000000..cccbfad1
--- /dev/null
+++ b/engine/configs/standard/postgres/default/18/pg_hba.conf
@@ -0,0 +1,128 @@
+# PostgreSQL Client Authentication Configuration File
+# ===================================================
+#
+# Refer to the "Client Authentication" section in the PostgreSQL
+# documentation for a complete description of this file. A short
+# synopsis follows.
+#
+# ----------------------
+# Authentication Records
+# ----------------------
+#
+# This file controls: which hosts are allowed to connect, how clients
+# are authenticated, which PostgreSQL user names they can use, which
+# databases they can access. Records take one of these forms:
+#
+# local DATABASE USER METHOD [OPTIONS]
+# host DATABASE USER ADDRESS METHOD [OPTIONS]
+# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
+# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
+# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
+# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
+#
+# (The uppercase items must be replaced by actual values.)
+#
+# The first field is the connection type:
+# - "local" is a Unix-domain socket
+# - "host" is a TCP/IP socket (encrypted or not)
+# - "hostssl" is a TCP/IP socket that is SSL-encrypted
+# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
+# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
+# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
+#
+# DATABASE can be "all", "sameuser", "samerole", "replication", a
+# database name, a regular expression (if it starts with a slash (/))
+# or a comma-separated list thereof. The "all" keyword does not match
+# "replication". Access to replication must be enabled in a separate
+# record (see example below).
+#
+# USER can be "all", a user name, a group name prefixed with "+", a
+# regular expression (if it starts with a slash (/)) or a comma-separated
+# list thereof. In both the DATABASE and USER fields you can also write
+# a file name prefixed with "@" to include names from a separate file.
+#
+# ADDRESS specifies the set of hosts the record matches. It can be a
+# host name, or it is made up of an IP address and a CIDR mask that is
+# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
+# specifies the number of significant bits in the mask. A host name
+# that starts with a dot (.) matches a suffix of the actual host name.
+# Alternatively, you can write an IP address and netmask in separate
+# columns to specify the set of hosts. Instead of a CIDR-address, you
+# can write "samehost" to match any of the server's own IP addresses,
+# or "samenet" to match any address in any subnet that the server is
+# directly connected to.
+#
+# METHOD can be "trust", "reject", "scram-sha-256", "password", "gss",
+# "sspi", "ident", "peer", "pam", "ldap", "radius", "cert" or "oauth".
+# Note that "password" sends passwords in clear text; "scram-sha-256" is
+# preferred since it sends encrypted passwords.
+#
+# OPTIONS are a set of options for the authentication in the format
+# NAME=VALUE. The available options depend on the different
+# authentication methods -- refer to the "Client Authentication"
+# section in the documentation for a list of which options are
+# available for which authentication methods.
+#
+# Database and user names containing spaces, commas, quotes and other
+# special characters must be quoted. Quoting one of the keywords
+# "all", "sameuser", "samerole" or "replication" makes the name lose
+# its special character, and just match a database or username with
+# that name.
+#
+# ---------------
+# Include Records
+# ---------------
+#
+# This file allows the inclusion of external files or directories holding
+# more records, using the following keywords:
+#
+# include FILE
+# include_if_exists FILE
+# include_dir DIRECTORY
+#
+# FILE is the file name to include, and DIR is the directory name containing
+# the file(s) to include. Any file in a directory will be loaded if suffixed
+# with ".conf". The files of a directory are ordered by name.
+# include_if_exists ignores missing files. FILE and DIRECTORY can be
+# specified as a relative or an absolute path, and can be double-quoted if
+# they contain spaces.
+#
+# -------------
+# Miscellaneous
+# -------------
+#
+# This file is read on server startup and when the server receives a
+# SIGHUP signal. If you edit the file on a running system, you have to
+# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
+# or execute "SELECT pg_reload_conf()".
+#
+# ----------------------------------
+# Put your actual configuration here
+# ----------------------------------
+#
+# If you want to allow non-local connections, you need to add more
+# "host" records. In that case you will also need to make PostgreSQL
+# listen on a non-local interface via the listen_addresses
+# configuration parameter, or via the -i or -h command line switches.
+
+# CAUTION: Configuring the system for local "trust" authentication
+# allows any local user to connect as any PostgreSQL user, including
+# the database superuser. If you do not trust all your local users,
+# use another authentication method.
+
+
+# TYPE DATABASE USER ADDRESS METHOD
+
+# "local" is for Unix domain socket connections only
+local all all trust
+# IPv4 local connections:
+host all all 127.0.0.1/32 trust
+# IPv6 local connections:
+host all all ::1/128 trust
+# Allow replication connections from localhost, by a user with the
+# replication privilege.
+local replication all trust
+host replication all 127.0.0.1/32 trust
+host replication all ::1/128 trust
+
+host all all all scram-sha-256
diff --git a/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf b/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf
new file mode 100644
index 00000000..425de11c
--- /dev/null
+++ b/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf
@@ -0,0 +1,884 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+# name = value
+#
+# (The "=" is optional.) Whitespace may be used. Comments are introduced with
+# "#" anywhere on a line. The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal. If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, run "pg_ctl reload", or execute
+# "SELECT pg_reload_conf()". Some parameters, which are marked below,
+# require a server shutdown and restart to take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=all". Some parameters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units: B = bytes Time units: us = microseconds
+# kB = kilobytes ms = milliseconds
+# MB = megabytes s = seconds
+# GB = gigabytes min = minutes
+# TB = terabytes h = hours
+# d = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir' # use data in another directory
+ # (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
+ # (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
+ # (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = '' # write an extra PID file
+ # (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+ # comma-separated list of addresses;
+ # defaults to 'localhost'; use '*' for all
+ # (change requires restart)
+#port = 5432 # (change requires restart)
+max_connections = 100 # (change requires restart)
+#reserved_connections = 0 # (change requires restart)
+#superuser_reserved_connections = 3 # (change requires restart)
+#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
+ # (change requires restart)
+#unix_socket_group = '' # (change requires restart)
+#unix_socket_permissions = 0777 # begin with 0 to use octal notation
+ # (change requires restart)
+#bonjour = off # advertise server via Bonjour
+ # (change requires restart)
+#bonjour_name = '' # defaults to the computer name
+ # (change requires restart)
+
+# - TCP settings -
+# see "man tcp" for details
+
+#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_count = 0 # TCP_KEEPCNT;
+ # 0 selects the system default
+#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
+ # 0 selects the system default
+
+#client_connection_check_interval = 0 # time between checks for client
+ # disconnection while running queries;
+ # 0 for never
+
+# - Authentication -
+
+#authentication_timeout = 1min # 1s-600s
+#password_encryption = scram-sha-256 # scram-sha-256 or md5
+#scram_iterations = 4096
+#md5_password_warnings = on
+#oauth_validator_libraries = '' # comma-separated list of trusted validator modules
+
+# GSSAPI using Kerberos
+#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
+#krb_caseins_users = off
+#gss_accept_delegation = off
+
+# - SSL -
+
+#ssl = off
+#ssl_ca_file = ''
+#ssl_cert_file = 'server.crt'
+#ssl_crl_file = ''
+#ssl_crl_dir = ''
+#ssl_key_file = 'server.key'
+#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed TLSv1.2 ciphers
+#ssl_tls13_ciphers = '' # allowed TLSv1.3 cipher suites, blank for default
+#ssl_prefer_server_ciphers = on
+#ssl_groups = 'X25519:prime256v1'
+#ssl_min_protocol_version = 'TLSv1.2'
+#ssl_max_protocol_version = ''
+#ssl_dh_params_file = ''
+#ssl_passphrase_command = ''
+#ssl_passphrase_command_supports_reload = off
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 128MB # min 128kB
+ # (change requires restart)
+#huge_pages = try # on, off, or try
+ # (change requires restart)
+#huge_page_size = 0 # zero for system default
+ # (change requires restart)
+#temp_buffers = 8MB # min 800kB
+#max_prepared_transactions = 0 # zero disables the feature
+ # (change requires restart)
+# Caution: it is not advisable to set max_prepared_transactions nonzero unless
+# you actively intend to use prepared transactions.
+#work_mem = 4MB # min 64kB
+#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
+#maintenance_work_mem = 64MB # min 64kB
+#autovacuum_work_mem = -1 # min 64kB, or -1 to use maintenance_work_mem
+#logical_decoding_work_mem = 64MB # min 64kB
+#max_stack_depth = 2MB # min 100kB
+#shared_memory_type = mmap # the default is the first option
+ # supported by the operating system:
+ # mmap
+ # sysv
+ # windows
+ # (change requires restart)
+dynamic_shared_memory_type = posix # the default is usually the first option
+ # supported by the operating system:
+ # posix
+ # sysv
+ # windows
+ # mmap
+ # (change requires restart)
+#min_dynamic_shared_memory = 0MB # (change requires restart)
+#vacuum_buffer_usage_limit = 2MB # size of vacuum and analyze buffer access strategy ring;
+ # 0 to disable vacuum buffer access strategy;
+ # range 128kB to 16GB
+
+# SLRU buffers (change requires restart)
+#commit_timestamp_buffers = 0 # memory for pg_commit_ts (0 = auto)
+#multixact_offset_buffers = 16 # memory for pg_multixact/offsets
+#multixact_member_buffers = 32 # memory for pg_multixact/members
+#notify_buffers = 16 # memory for pg_notify
+#serializable_buffers = 32 # memory for pg_serial
+#subtransaction_buffers = 0 # memory for pg_subtrans (0 = auto)
+#transaction_buffers = 0 # memory for pg_xact (0 = auto)
+
+# - Disk -
+
+#temp_file_limit = -1 # limits per-process temp file space
+ # in kilobytes, or -1 for no limit
+
+#file_copy_method = copy # copy, clone (if supported by OS)
+
+#max_notify_queue_pages = 1048576 # limits the number of SLRU pages allocated
+ # for NOTIFY / LISTEN queue
+
+# - Kernel Resources -
+
+#max_files_per_process = 1000 # min 64
+ # (change requires restart)
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms # 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
+#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
+#bgwriter_flush_after = 512kB # measured in pages, 0 disables
+
+# - I/O -
+
+#backend_flush_after = 0 # measured in pages, 0 disables
+#effective_io_concurrency = 16 # 1-1000; 0 disables issuing multiple simultaneous IO requests
+#maintenance_io_concurrency = 16 # 1-1000; same as effective_io_concurrency
+#io_max_combine_limit = 128kB # usually 1-128 blocks (depends on OS)
+ # (change requires restart)
+#io_combine_limit = 128kB # usually 1-128 blocks (depends on OS)
+
+#io_method = worker # worker, io_uring, sync
+ # (change requires restart)
+#io_max_concurrency = -1 # Max number of IOs that one process
+ # can execute simultaneously
+ # -1 sets based on shared_buffers
+ # (change requires restart)
+#io_workers = 3 # 1-32;
+
+# - Worker Processes -
+
+#max_worker_processes = 8 # (change requires restart)
+#max_parallel_workers_per_gather = 2 # limited by max_parallel_workers
+#max_parallel_maintenance_workers = 2 # limited by max_parallel_workers
+#max_parallel_workers = 8 # number of max_worker_processes that
+ # can be used in parallel operations
+#parallel_leader_participation = on
+
+
+#------------------------------------------------------------------------------
+# WRITE-AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#wal_level = replica # minimal, replica, or logical
+ # (change requires restart)
+#fsync = on # flush data to disk for crash safety
+ # (turning this off can cause
+ # unrecoverable data corruption)
+#synchronous_commit = on # synchronization level;
+ # off, local, remote_write, remote_apply, or on
+#wal_sync_method = fsync # the default is the first option
+ # supported by the operating system:
+ # open_datasync
+ # fdatasync (default on Linux and FreeBSD)
+ # fsync
+ # fsync_writethrough
+ # open_sync
+#full_page_writes = on # recover from partial page writes
+#wal_log_hints = off # also do full page writes of non-critical updates
+ # (change requires restart)
+#wal_compression = off # enables compression of full-page writes;
+ # off, pglz, lz4, zstd, or on
+#wal_init_zero = on # zero-fill new WAL files
+#wal_recycle = on # recycle WAL files
+#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
+ # (change requires restart)
+#wal_writer_delay = 200ms # 1-10000 milliseconds
+#wal_writer_flush_after = 1MB # measured in pages, 0 disables
+#wal_skip_threshold = 2MB
+
+#commit_delay = 0 # range 0-100000, in microseconds
+#commit_siblings = 5 # range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_timeout = 5min # range 30s-1d
+#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
+#checkpoint_flush_after = 256kB # measured in pages, 0 disables
+#checkpoint_warning = 30s # 0 disables
+max_wal_size = 1GB
+min_wal_size = 80MB
+
+# - Prefetching during recovery -
+
+#recovery_prefetch = try # prefetch pages referenced in the WAL?
+#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
+ # (change requires restart)
+
+# - Archiving -
+
+#archive_mode = off # enables archiving; off, on, or always
+ # (change requires restart)
+#archive_library = '' # library to use to archive a WAL file
+ # (empty string indicates archive_command should
+ # be used)
+#archive_command = '' # command to use to archive a WAL file
+ # placeholders: %p = path of file to archive
+ # %f = file name only
+ # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
+#archive_timeout = 0 # force a WAL file switch after this
+ # number of seconds; 0 disables
+
+# - Archive Recovery -
+
+# These are only used in recovery mode.
+
+#restore_command = '' # command to use to restore an archived WAL file
+ # placeholders: %p = path of file to restore
+ # %f = file name only
+ # e.g. 'cp /mnt/server/archivedir/%f %p'
+#archive_cleanup_command = '' # command to execute at every restartpoint
+#recovery_end_command = '' # command to execute at completion of recovery
+
+# - Recovery Target -
+
+# Set these only when performing a targeted recovery.
+
+#recovery_target = '' # 'immediate' to end recovery as soon as a
+ # consistent state is reached
+ # (change requires restart)
+#recovery_target_name = '' # the named restore point to which recovery will proceed
+ # (change requires restart)
+#recovery_target_time = '' # the time stamp up to which recovery will proceed
+ # (change requires restart)
+#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
+ # (change requires restart)
+#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
+ # (change requires restart)
+#recovery_target_inclusive = on # Specifies whether to stop:
+ # just after the specified recovery target (on)
+ # just before the recovery target (off)
+ # (change requires restart)
+#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
+ # (change requires restart)
+#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
+ # (change requires restart)
+
+# - WAL Summarization -
+
+#summarize_wal = off # run WAL summarizer process?
+#wal_summary_keep_time = '10d' # when to remove old summary files, 0 = never
+
+
+#------------------------------------------------------------------------------
+# REPLICATION
+#------------------------------------------------------------------------------
+
+# - Sending Servers -
+
+# Set these on the primary and on any standby that will send replication data.
+
+#max_wal_senders = 10 # max number of walsender processes
+ # (change requires restart)
+#max_replication_slots = 10 # max number of replication slots
+ # (change requires restart)
+#wal_keep_size = 0 # in megabytes; 0 disables
+#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
+#idle_replication_slot_timeout = 0 # in seconds; 0 disables
+#wal_sender_timeout = 60s # in milliseconds; 0 disables
+#track_commit_timestamp = off # collect timestamp of transaction commit
+ # (change requires restart)
+
+# - Primary Server -
+
+# These settings are ignored on a standby server.
+
+#synchronous_standby_names = '' # standby servers that provide sync rep
+ # method to choose sync standbys, number of sync standbys,
+ # and comma-separated list of application_name
+ # from standby(s); '*' = all
+#synchronized_standby_slots = '' # streaming replication standby server slot
+ # names that logical walsender processes will wait for
+
+# - Standby Servers -
+
+# These settings are ignored on a primary server.
+
+#primary_conninfo = '' # connection string to sending server
+#primary_slot_name = '' # replication slot on sending server
+#hot_standby = on # "off" disallows queries during recovery
+ # (change requires restart)
+#max_standby_archive_delay = 30s # max delay before canceling queries
+ # when reading WAL from archive;
+ # -1 allows indefinite delay
+#max_standby_streaming_delay = 30s # max delay before canceling queries
+ # when reading streaming WAL;
+ # -1 allows indefinite delay
+#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
+ # is not set
+#wal_receiver_status_interval = 10s # send replies at least this often
+ # 0 disables
+#hot_standby_feedback = off # send info from standby to prevent
+ # query conflicts
+#wal_receiver_timeout = 60s # time that receiver waits for
+ # communication from primary
+ # in milliseconds; 0 disables
+#wal_retrieve_retry_interval = 5s # time to wait before retrying to
+ # retrieve WAL after a failed attempt
+#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
+#sync_replication_slots = off # enables slot synchronization on the physical standby from the primary
+
+# - Subscribers -
+
+# These settings are ignored on a publisher.
+
+#max_active_replication_origins = 10 # max number of active replication origins
+ # (change requires restart)
+#max_logical_replication_workers = 4 # taken from max_worker_processes
+ # (change requires restart)
+#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
+#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_async_append = on
+#enable_bitmapscan = on
+#enable_gathermerge = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_incremental_sort = on
+#enable_indexscan = on
+#enable_indexonlyscan = on
+#enable_material = on
+#enable_memoize = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_parallel_append = on
+#enable_parallel_hash = on
+#enable_partition_pruning = on
+#enable_partitionwise_join = off
+#enable_partitionwise_aggregate = off
+#enable_presorted_aggregate = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+#enable_group_by_reordering = on
+#enable_distinct_reordering = on
+#enable_self_join_elimination = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0 # measured on an arbitrary scale
+#random_page_cost = 4.0 # same scale as above
+#cpu_tuple_cost = 0.01 # same scale as above
+#cpu_index_tuple_cost = 0.005 # same scale as above
+#cpu_operator_cost = 0.0025 # same scale as above
+#parallel_setup_cost = 1000.0 # same scale as above
+#parallel_tuple_cost = 0.1 # same scale as above
+#min_parallel_table_scan_size = 8MB
+#min_parallel_index_scan_size = 512kB
+#effective_cache_size = 4GB
+
+#jit_above_cost = 100000 # perform JIT compilation if available
+ # and query more expensive than this;
+ # -1 disables
+#jit_inline_above_cost = 500000 # inline small functions if query is
+ # more expensive than this; -1 disables
+#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
+ # query is more expensive than this;
+ # -1 disables
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5 # range 1-10
+#geqo_pool_size = 0 # selects default based on effort
+#geqo_generations = 0 # selects default based on effort
+#geqo_selection_bias = 2.0 # range 1.5-2.0
+#geqo_seed = 0.0 # range 0.0-1.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 100 # range 1-10000
+#constraint_exclusion = partition # on, off, or partition
+#cursor_tuple_fraction = 0.1 # range 0.0-1.0
+#from_collapse_limit = 8
+#jit = on # allow JIT compilation
+#join_collapse_limit = 8 # 1 disables collapsing of explicit
+ # JOIN clauses
+#plan_cache_mode = auto # auto, force_generic_plan or
+ # force_custom_plan
+#recursive_worktable_factor = 10.0 # range 0.001-1000000
+
+
+#------------------------------------------------------------------------------
+# REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr' # Valid values are combinations of
+ # stderr, csvlog, jsonlog, syslog, and
+ # eventlog, depending on platform.
+ # csvlog and jsonlog require
+ # logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off # Enable capturing of stderr, jsonlog,
+ # and csvlog into log files. Required
+ # to be on for csvlogs and jsonlogs.
+ # (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'log' # directory where log files are written,
+ # can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
+ # can include strftime() escapes
+#log_file_mode = 0600 # creation mode for log files,
+ # begin with 0 to use octal notation
+#log_rotation_age = 1d # Automatic rotation of logfiles will
+ # happen after that time. 0 disables.
+#log_rotation_size = 10MB # Automatic rotation of logfiles will
+ # happen after that much log output.
+ # 0 disables.
+#log_truncate_on_rotation = off # If on, an existing log file with the
+ # same name as the new log file will be
+ # truncated rather than appended to.
+ # But such truncation only occurs on
+ # time-driven rotation, not on restarts
+ # or size-driven rotation. Default is
+ # off, meaning append to existing files
+ # in all cases.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+#syslog_sequence_numbers = on
+#syslog_split_messages = on
+
+# This is only relevant when logging to eventlog (Windows):
+# (change requires restart)
+#event_source = 'PostgreSQL'
+
+# - When to Log -
+
+#log_min_messages = warning # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic
+
+#log_min_error_statement = error # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic (effectively off)
+
+#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
+ # and their durations, > 0 logs only
+ # statements running at least this number
+ # of milliseconds
+
+#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
+ # and their durations, > 0 logs only a sample of
+ # statements running at least this number
+ # of milliseconds;
+ # sample fraction is determined by log_statement_sample_rate
+
+#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
+ # log_min_duration_sample to be logged;
+ # 1.0 logs all such statements, 0.0 never logs
+
+
+#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
+ # are logged regardless of their duration; 1.0 logs all
+ # statements from all transactions, 0.0 never logs
+
+#log_startup_progress_interval = 10s # Time between progress updates for
+ # long-running startup operations.
+ # 0 disables the feature, > 0 indicates
+ # the interval in milliseconds.
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = on
+#log_autovacuum_min_duration = 10min # log autovacuum activity;
+ # -1 disables, 0 logs all actions and
+ # their durations, > 0 logs only
+ # actions running at least this number
+ # of milliseconds.
+#log_checkpoints = on
+#log_connections = '' # log aspects of connection setup
+ # options include receipt, authentication, authorization,
+ # setup_durations, and all to log all of these aspects
+#log_disconnections = off
+#log_duration = off # log statement duration
+#log_error_verbosity = default # terse, default, or verbose messages
+#log_hostname = off
+#log_line_prefix = '%m [%p] ' # special values:
+ # %a = application name
+ # %u = user name
+ # %d = database name
+ # %r = remote host and port
+ # %h = remote host
+ # %L = local address
+ # %b = backend type
+ # %p = process ID
+ # %P = process ID of parallel group leader
+ # %t = timestamp without milliseconds
+ # %m = timestamp with milliseconds
+ # %n = timestamp with milliseconds (as a Unix epoch)
+ # %Q = query ID (0 if none or not computed)
+ # %i = command tag
+ # %e = SQL state
+ # %c = session ID
+ # %l = session line number
+ # %s = session start timestamp
+ # %v = virtual transaction ID
+ # %x = transaction ID (0 if none)
+ # %q = stop here in non-session
+ # processes
+ # %% = '%'
+ # e.g. '<%u%%%d> '
+#log_lock_waits = off # log lock waits >= deadlock_timeout
+#log_lock_failures = off # log lock failures
+#log_recovery_conflict_waits = off # log standby recovery conflict waits
+ # >= deadlock_timeout
+#log_parameter_max_length = -1 # when logging statements, limit logged
+ # bind-parameter values to N bytes;
+ # -1 means print in full, 0 disables
+#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
+ # bind-parameter values to N bytes;
+ # -1 means print in full, 0 disables
+#log_statement = 'none' # none, ddl, mod, all
+#log_replication_commands = off
+#log_temp_files = -1 # log temporary files equal or larger
+ # than the specified size in kilobytes;
+ # -1 disables, 0 logs all temp files
+log_timezone = 'Etc/UTC'
+
+# - Process Title -
+
+#cluster_name = '' # added to process titles if nonempty
+ # (change requires restart)
+#update_process_title = on
+
+
+#------------------------------------------------------------------------------
+# STATISTICS
+#------------------------------------------------------------------------------
+
+# - Cumulative Query and Index Statistics -
+
+#track_activities = on
+#track_activity_query_size = 1024 # (change requires restart)
+#track_counts = on
+#track_cost_delay_timing = off
+#track_io_timing = off
+#track_wal_io_timing = off
+#track_functions = none # none, pl, all
+#stats_fetch_consistency = cache # cache, none, snapshot
+
+
+# - Monitoring -
+
+#compute_query_id = auto
+#log_statement_stats = off
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+
+
+#------------------------------------------------------------------------------
+# VACUUMING
+#------------------------------------------------------------------------------
+
+# - Automatic Vacuuming -
+
+#autovacuum = on # Enable autovacuum subprocess? 'on'
+ # requires track_counts to also be on.
+autovacuum_worker_slots = 16 # autovacuum worker slots to allocate
+ # (change requires restart)
+#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
+#autovacuum_naptime = 1min # time between autovacuum runs
+#autovacuum_vacuum_threshold = 50 # min number of row updates before
+ # vacuum
+#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
+ # before vacuum; -1 disables insert
+ # vacuums
+#autovacuum_analyze_threshold = 50 # min number of row updates before
+ # analyze
+#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
+#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of unfrozen pages
+ # before insert vacuum
+#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
+#autovacuum_vacuum_max_threshold = 100000000 # max number of row updates
+ # before vacuum; -1 disables max
+ # threshold
+#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
+ # (change requires restart)
+#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
+ # before forced vacuum
+ # (change requires restart)
+#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
+ # autovacuum, in milliseconds;
+ # -1 means use vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
+ # autovacuum, -1 means use
+ # vacuum_cost_limit
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
+#vacuum_cost_page_hit = 1 # 0-10000 credits
+#vacuum_cost_page_miss = 2 # 0-10000 credits
+#vacuum_cost_page_dirty = 20 # 0-10000 credits
+#vacuum_cost_limit = 200 # 1-10000 credits
+
+# - Default Behavior -
+
+#vacuum_truncate = on # enable truncation after vacuum
+
+# - Freezing -
+
+#vacuum_freeze_table_age = 150000000
+#vacuum_freeze_min_age = 50000000
+#vacuum_failsafe_age = 1600000000
+#vacuum_multixact_freeze_table_age = 150000000
+#vacuum_multixact_freeze_min_age = 5000000
+#vacuum_multixact_failsafe_age = 1600000000
+#vacuum_max_eager_freeze_failure_rate = 0.03 # 0 disables eager scanning
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#client_min_messages = notice # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # log
+ # notice
+ # warning
+ # error
+#search_path = '"$user", public' # schema names
+#row_security = on
+#default_table_access_method = 'heap'
+#default_tablespace = '' # a tablespace name, '' uses the default
+#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
+#temp_tablespaces = '' # a list of tablespace names, '' uses
+ # only default tablespace
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#default_transaction_deferrable = off
+#session_replication_role = 'origin'
+#statement_timeout = 0 # in milliseconds, 0 is disabled
+#transaction_timeout = 0 # in milliseconds, 0 is disabled
+#lock_timeout = 0 # in milliseconds, 0 is disabled
+#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
+#idle_session_timeout = 0 # in milliseconds, 0 is disabled
+#bytea_output = 'hex' # hex, escape
+#xmlbinary = 'base64'
+#xmloption = 'content'
+#gin_pending_list_limit = 4MB
+#createrole_self_grant = '' # set and/or inherit
+#event_triggers = on
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#intervalstyle = 'postgres'
+timezone = 'Etc/UTC'
+#timezone_abbreviations = 'Default' # Select the set of available time zone
+ # abbreviations. Currently, there are
+ # Default
+ # Australia (historical usage)
+ # India
+ # You can create your own file in
+ # share/timezonesets/.
+#extra_float_digits = 1 # min -15, max 3; any value >0 actually
+ # selects precise output mode
+#client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'en_US.utf8' # locale for system error message
+ # strings
+lc_monetary = 'en_US.utf8' # locale for monetary formatting
+lc_numeric = 'en_US.utf8' # locale for number formatting
+lc_time = 'en_US.utf8' # locale for time formatting
+
+#icu_validation_level = warning # report ICU locale validation
+ # errors at the given level
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Shared Library Preloading -
+
+#local_preload_libraries = ''
+#session_preload_libraries = ''
+#shared_preload_libraries = '' # (change requires restart)
+#jit_provider = 'llvmjit' # JIT library to use
+
+# - Other Defaults -
+
+#dynamic_library_path = '$libdir'
+#extension_control_path = '$system'
+#gin_fuzzy_search_limit = 0
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64 # min 10
+ # (change requires restart)
+#max_pred_locks_per_transaction = 64 # min 10
+ # (change requires restart)
+#max_pred_locks_per_relation = -2 # negative values mean
+ # (max_pred_locks_per_transaction
+ # / -max_pred_locks_per_relation) - 1
+#max_pred_locks_per_page = 2 # min 0
+
+
+#------------------------------------------------------------------------------
+# VERSION AND PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#array_nulls = on
+#backslash_quote = safe_encoding # on, off, or safe_encoding
+#escape_string_warning = on
+#lo_compat_privileges = off
+#quote_all_identifiers = off
+#standard_conforming_strings = on
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+#allow_alter_system = on
+
+
+#------------------------------------------------------------------------------
+# ERROR HANDLING
+#------------------------------------------------------------------------------
+
+#exit_on_error = off # terminate session on any error?
+#restart_after_crash = on # reinitialize after backend crash?
+#data_sync_retry = off # retry or panic on failure to fsync
+ # data?
+ # (change requires restart)
+#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
+
+
+#------------------------------------------------------------------------------
+# CONFIG FILE INCLUDES
+#------------------------------------------------------------------------------
+
+# These options allow settings to be loaded from files other than the
+# default postgresql.conf. Note that these are directives, not variable
+# assignments, so they can usefully be given more than once.
+
+#include_dir = '...' # include files ending in '.conf' from
+ # a directory, e.g., 'conf.d'
+#include_if_exists = '...' # include file only if it exists
+#include = '...' # include file
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+# Add settings for extensions here
diff --git a/engine/deploy/swagger-ui.yaml b/engine/deploy/swagger-ui.yaml
deleted file mode 100644
index 9bed354d..00000000
--- a/engine/deploy/swagger-ui.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: dblab-swagger-ui
- labels:
- app: dblab-swagger-ui
-spec:
- type: ClusterIP
- ports:
- - port: 80
- targetPort: 80
- selector:
- app: dblab-swagger-ui
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: dblab-swagger-ui
- labels:
- app: dblab-swagger-ui
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: dblab-swagger-ui
- template:
- metadata:
- labels:
- app: dblab-swagger-ui
- spec:
- containers:
- - name: dblab-swagger-ui
- image: $TAG
- imagePullPolicy: Always
diff --git a/engine/go.mod b/engine/go.mod
index ec0ddf86..d9b4c811 100644
--- a/engine/go.mod
+++ b/engine/go.mod
@@ -1,6 +1,6 @@
module gitlab.com/postgres-ai/database-lab/v3
-go 1.23
+go 1.24.7
require (
github.com/AlekSi/pointer v1.2.0
@@ -12,13 +12,13 @@ require (
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.5.0
github.com/dustin/go-humanize v1.0.1
- github.com/golang-jwt/jwt/v4 v4.5.0
+ github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/go-github/v34 v34.0.0
- github.com/google/uuid v1.3.0
+ github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/jackc/pgtype v1.14.0
- github.com/jackc/pgx/v4 v4.18.1
+ github.com/jackc/pgx/v4 v4.18.2
github.com/lib/pq v1.10.9
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pkg/errors v0.9.1
@@ -31,21 +31,21 @@ require (
github.com/testcontainers/testcontainers-go v0.12.0
github.com/urfave/cli/v2 v2.25.7
github.com/wagslane/go-password-validator v0.3.0
- golang.org/x/crypto v0.14.0
- golang.org/x/mod v0.12.0
- golang.org/x/oauth2 v0.10.0
+ golang.org/x/crypto v0.41.0
+ golang.org/x/mod v0.26.0
+ golang.org/x/oauth2 v0.30.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
+ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
- github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
- github.com/containerd/containerd v1.7.2 // indirect
+ github.com/containerd/containerd v1.7.28 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -53,25 +53,26 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
- github.com/jackc/pgconn v1.14.1 // indirect
+ github.com/jackc/pgconn v1.14.3 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
- github.com/jackc/pgproto3/v2 v2.3.2 // indirect
+ github.com/jackc/pgproto3/v2 v2.3.3 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/klauspost/compress v1.16.7 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/magiconair/properties v1.8.5 // indirect
- github.com/moby/patternmatcher v0.5.0 // indirect
- github.com/moby/sys/sequential v0.5.0 // indirect
- github.com/moby/sys/user v0.3.0 // indirect
+ github.com/moby/patternmatcher v0.6.0 // indirect
+ github.com/moby/sys/sequential v0.6.0 // indirect
+ github.com/moby/sys/user v0.4.0 // indirect
+ github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -83,17 +84,13 @@ require (
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect
go.opentelemetry.io/otel v1.30.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.18.0 // indirect
go.opentelemetry.io/otel/metric v1.30.0 // indirect
- go.opentelemetry.io/otel/sdk v1.18.0 // indirect
go.opentelemetry.io/otel/trace v1.30.0 // indirect
- golang.org/x/net v0.17.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
- golang.org/x/tools v0.11.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
- google.golang.org/protobuf v1.31.0 // indirect
+ golang.org/x/net v0.42.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/grpc v1.67.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+ gotest.tools/v3 v3.5.2 // indirect
)
diff --git a/engine/go.sum b/engine/go.sum
index 9be68150..2a847cb5 100644
--- a/engine/go.sum
+++ b/engine/go.sum
@@ -22,8 +22,8 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
-github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w=
github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@@ -51,8 +51,8 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tT
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@@ -60,8 +60,8 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
-github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
+github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
+github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -143,8 +143,8 @@ github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo=
-github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI=
+github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c=
+github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -207,8 +207,6 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
-github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
@@ -312,8 +310,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
-github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -340,8 +338,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -353,8 +349,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github/v34 v34.0.0 h1:/siYFImY8KwGc5QD1gaPf+f8QX6tLwxNIco2RkYxoFA=
github.com/google/go-github/v34 v34.0.0/go.mod h1:w/2qlrXUfty+lbyO6tatnzIw97v1CM+/jZcwXMDiPQQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -372,8 +368,9 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
@@ -417,9 +414,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
-github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
-github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4=
-github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E=
+github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
+github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
@@ -435,8 +431,8 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0=
-github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
+github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
@@ -450,12 +446,11 @@ github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
-github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=
-github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
+github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU=
+github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@@ -475,8 +470,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -520,17 +515,19 @@ github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
-github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
-github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
+github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
-github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
-github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
+github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
-github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
+github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
+github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
+github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
@@ -567,8 +564,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
-github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
@@ -678,8 +675,6 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -689,9 +684,6 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -748,14 +740,14 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 h1:IAtl+7gua134xcV3NieDhJHjjOVeJhXAnYf/0hswjUY=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0/go.mod h1:w+pXobnBzh95MNIkeIuAKcHe/Uu/CX2PKIvBP6ipKRA=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.18.0 h1:6pu8ttx76BxHf+xz/H77AUZkPF3cwWzXqAUsXhVKI18=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.18.0/go.mod h1:IOmXxPrxoxFMXdNy7lfDmE8MzE61YPcurbUm0SMjerI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
-go.opentelemetry.io/otel/sdk v1.18.0 h1:e3bAB0wB3MljH38sHzpV/qWrOTCFrdZF2ct9F8rBkcY=
-go.opentelemetry.io/otel/sdk v1.18.0/go.mod h1:1RCygWV7plY2KmdskZEDDBs4tJeHG92MdHZIluiYs/M=
+go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
+go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
@@ -789,9 +781,8 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -823,8 +814,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -861,16 +852,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
-golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -881,8 +871,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -955,17 +943,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -975,16 +961,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1028,8 +1013,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
-golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1052,8 +1035,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1077,10 +1058,11 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1095,8 +1077,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o=
-google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1110,8 +1092,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1147,8 +1129,9 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
+gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/engine/internal/cloning/base.go b/engine/internal/cloning/base.go
index e5edb759..952a9436 100644
--- a/engine/internal/cloning/base.go
+++ b/engine/internal/cloning/base.go
@@ -387,8 +387,13 @@ func (c *Base) destroyClone(cloneID string, w *CloneWrapper) {
if w.Clone.Snapshot != nil {
c.decrementCloneNumber(w.Clone.Snapshot.ID)
}
+
c.observingCh <- cloneID
+ if err := c.provision.CleanupCloneDataset(w.Clone, w.Clone.Snapshot.Pool); err != nil {
+ log.Errf("failed to cleanup clone dataset: %v", err)
+ }
+
c.SaveClonesState()
c.webhookCh <- webhooks.CloneEvent{
@@ -430,8 +435,10 @@ func (c *Base) refreshCloneMetadata(w *CloneWrapper) {
return
}
+ c.cloneMutex.Lock()
w.Clone.Metadata.CloneDiffSize = sessionState.CloneDiffSize
w.Clone.Metadata.LogicalSize = sessionState.LogicalReferenced
+ c.cloneMutex.Unlock()
}
// UpdateClone updates clone.
@@ -522,11 +529,6 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest)
log.Warn("clone has dependent snapshots", cloneID)
c.cloneMutex.Lock()
w.Clone.Revision++
- w.Clone.HasDependent = true
- c.cloneMutex.Unlock()
- } else {
- c.cloneMutex.Lock()
- w.Clone.HasDependent = false
c.cloneMutex.Unlock()
}
@@ -625,6 +627,8 @@ func (c *Base) GetClones() []*models.Clone {
clones := make([]*models.Clone, 0, c.lenClones())
c.cloneMutex.RLock()
+ requestsByPool := make(map[string][]resources.SessionStateRequest)
+
for _, cloneWrapper := range c.clones {
if cloneWrapper.Clone.Snapshot != nil {
snapshot, err := c.getSnapshotByID(cloneWrapper.Clone.Snapshot.ID)
@@ -637,12 +641,30 @@ func (c *Base) GetClones() []*models.Clone {
}
}
- c.refreshCloneMetadata(cloneWrapper)
+ if cloneWrapper.Session != nil && cloneWrapper.Clone != nil {
+ pool := cloneWrapper.Session.Pool
+ requestsByPool[pool] = append(requestsByPool[pool], resources.SessionStateRequest{
+ CloneID: cloneWrapper.Clone.ID,
+ Branch: cloneWrapper.Clone.Branch,
+ })
+ }
clones = append(clones, cloneWrapper.Clone)
}
c.cloneMutex.RUnlock()
+ sessionStates, err := c.provision.GetBatchSessionState(requestsByPool)
+ if err != nil {
+ log.Err("failed to get batch session states: ", err)
+ }
+
+ for _, clone := range clones {
+ if state, ok := sessionStates[clone.ID]; ok {
+ clone.Metadata.CloneDiffSize = state.CloneDiffSize
+ clone.Metadata.LogicalSize = state.LogicalReferenced
+ }
+ }
+
sort.Slice(clones, func(i, j int) bool {
return clones[i].CreatedAt.After(clones[j].CreatedAt.Time)
})
diff --git a/engine/internal/provision/mode_local.go b/engine/internal/provision/mode_local.go
index 7bc89cab..e457e859 100644
--- a/engine/internal/provision/mode_local.go
+++ b/engine/internal/provision/mode_local.go
@@ -274,10 +274,8 @@ func (p *Provisioner) ResetSession(session *resources.Session, clone *models.Clo
return nil, errors.Wrap(err, "failed to stop container")
}
- if clone.Revision == branching.DefaultRevision || !clone.HasDependent {
- if err = fsm.DestroyClone(clone.Branch, name, clone.Revision); err != nil {
- return nil, errors.Wrap(err, "failed to destroy clone")
- }
+ if err = fsm.DestroyClone(clone.Branch, name, clone.Revision); err != nil {
+ return nil, errors.Wrap(err, "failed to destroy clone")
}
if err = newFSManager.CreateClone(clone.Branch, name, snapshot.ID, clone.Revision); err != nil {
@@ -300,9 +298,14 @@ func (p *Provisioner) ResetSession(session *resources.Session, clone *models.Clo
}
snapshotModel := &models.Snapshot{
- ID: snapshot.ID,
- CreatedAt: models.NewLocalTime(snapshot.CreatedAt),
- DataStateAt: models.NewLocalTime(snapshot.DataStateAt),
+ ID: snapshot.ID,
+ CreatedAt: models.NewLocalTime(snapshot.CreatedAt),
+ DataStateAt: models.NewLocalTime(snapshot.DataStateAt),
+ PhysicalSize: snapshot.Used,
+ LogicalSize: snapshot.LogicalReferenced,
+ Pool: snapshot.Pool,
+ Branch: snapshot.Branch,
+ Message: snapshot.Message,
}
return snapshotModel, nil
@@ -335,6 +338,31 @@ func (p *Provisioner) GetSessionState(s *resources.Session, branch, cloneID stri
return fsm.GetSessionState(branch, cloneID)
}
+// GetBatchSessionState retrieves session states for multiple clones efficiently.
+func (p *Provisioner) GetBatchSessionState(batch map[string][]resources.SessionStateRequest) (map[string]resources.SessionState, error) {
+ batchResults := make(map[string]resources.SessionState)
+
+ for poolName, reqs := range batch {
+ fsm, err := p.pm.GetFSManager(poolName)
+ if err != nil {
+ log.Err(fmt.Sprintf("failed to find filesystem manager for pool %s: %v", poolName, err))
+ continue
+ }
+
+ results, err := fsm.GetBatchSessionState(reqs)
+ if err != nil {
+ log.Err(fmt.Sprintf("failed to get batch session state for pool %s: %v", poolName, err))
+ continue
+ }
+
+ for cloneID, state := range results {
+ batchResults[cloneID] = state
+ }
+ }
+
+ return batchResults, nil
+}
+
// GetPoolEntryList provides an ordered list of available pools.
func (p *Provisioner) GetPoolEntryList() []models.PoolEntry {
fsmList := p.pm.GetFSManagerOrderedList()
@@ -571,6 +599,46 @@ func (p *Provisioner) StopAllSessions(exceptClones map[string]struct{}) error {
return nil
}
+func reviewDown(repo *models.Repo, cloneDataset string) string {
+ for snapshotID := range repo.Snapshots {
+ if strings.HasPrefix(snapshotID, cloneDataset) {
+ return snapshotID
+ }
+ }
+
+ return ""
+}
+
+// CleanupCloneDataset removes a clone dataset.
+func (p *Provisioner) CleanupCloneDataset(clone *models.Clone, pool string) error {
+ if clone.Snapshot == nil {
+ return fmt.Errorf("clone has no snapshot, so the pool cannot be determined. Skip cleanup")
+ }
+
+ fsm, err := p.pm.GetFSManager(clone.Snapshot.Pool)
+ if err != nil {
+ return fmt.Errorf("cannot work with pool %s: %w", pool, err)
+ }
+
+ repo, err := fsm.GetRepo()
+ if err != nil {
+ return fmt.Errorf("failed to get snapshots: %w", err)
+ }
+
+ snapshotDep := reviewDown(repo, branching.CloneName(pool, clone.Branch, clone.ID, clone.Revision))
+ if snapshotDep != "" {
+ log.Dbg(fmt.Sprintf("Dataset has commit: %s. Skip destroying", snapshotDep))
+
+ return nil
+ }
+
+ if err = fsm.DestroyClone(clone.Branch, clone.ID, clone.Revision); err != nil {
+ return fmt.Errorf("failed to destroy clone: %w", err)
+ }
+
+ return nil
+}
+
func (p *Provisioner) stopPoolSessions(fsm pool.FSManager, exceptClones map[string]struct{}) error {
fsPool := fsm.Pool()
diff --git a/engine/internal/provision/mode_local_test.go b/engine/internal/provision/mode_local_test.go
index 72c70e13..9183ec2a 100644
--- a/engine/internal/provision/mode_local_test.go
+++ b/engine/internal/provision/mode_local_test.go
@@ -102,6 +102,10 @@ func (m mockFSManager) GetSessionState(_, _ string) (*resources.SessionState, er
return nil, nil
}
+func (m mockFSManager) GetBatchSessionState(_ []resources.SessionStateRequest) (map[string]resources.SessionState, error) {
+ return make(map[string]resources.SessionState), nil
+}
+
func (m mockFSManager) GetFilesystemState() (models.FileSystem, error) {
return models.FileSystem{Mode: "zfs"}, nil
}
@@ -214,6 +218,14 @@ func (m mockFSManager) KeepRelation(_ string) error {
return nil
}
+func (m mockFSManager) GetDatasetOrigins(_ string) []string {
+ return nil
+}
+
+func (m mockFSManager) GetActiveDatasets(_ string) ([]string, error) {
+ return nil, nil
+}
+
func TestBuildPoolEntry(t *testing.T) {
testCases := []struct {
pool *resources.Pool
diff --git a/engine/internal/provision/pool/manager.go b/engine/internal/provision/pool/manager.go
index 1c63a6a2..d07fa242 100644
--- a/engine/internal/provision/pool/manager.go
+++ b/engine/internal/provision/pool/manager.go
@@ -39,6 +39,7 @@ type Cloner interface {
// StateReporter describes methods of state reporting.
type StateReporter interface {
GetSessionState(branch, name string) (*resources.SessionState, error)
+ GetBatchSessionState(requests []resources.SessionStateRequest) (map[string]resources.SessionState, error)
GetFilesystemState() (models.FileSystem, error)
}
@@ -57,7 +58,7 @@ type Branching interface {
VerifyBranchMetadata() error
CreateDataset(datasetName string) error
CreateBranch(branchName, snapshotID string) error
- DestroyDataset(branchName string) (err error)
+ DestroyDataset(dataset string) (err error)
ListBranches() (map[string]string, error)
ListAllBranches(filterPools []string) ([]models.BranchEntity, error)
GetRepo() (*models.Repo, error)
@@ -78,6 +79,8 @@ type Branching interface {
Reset(snapshotID string, options thinclones.ResetOptions) error
HasDependentEntity(snapshotName string) ([]string, error)
KeepRelation(snapshotName string) error
+ GetDatasetOrigins(snapshotName string) []string
+ GetActiveDatasets(dataset string) ([]string, error)
}
// Pooler describes methods for Pool providing.
diff --git a/engine/internal/provision/resources/resources.go b/engine/internal/provision/resources/resources.go
index 1a5538ee..5892bb76 100644
--- a/engine/internal/provision/resources/resources.go
+++ b/engine/internal/provision/resources/resources.go
@@ -48,3 +48,9 @@ type SessionState struct {
CloneDiffSize uint64
LogicalReferenced uint64
}
+
+// SessionStateRequest defines a request for batch session state retrieval.
+type SessionStateRequest struct {
+ CloneID string
+ Branch string
+}
diff --git a/engine/internal/provision/thinclones/lvm/lvmanager.go b/engine/internal/provision/thinclones/lvm/lvmanager.go
index 8afc4c74..c7fc0d59 100644
--- a/engine/internal/provision/thinclones/lvm/lvmanager.go
+++ b/engine/internal/provision/thinclones/lvm/lvmanager.go
@@ -136,6 +136,11 @@ func (m *LVManager) GetSessionState(_, _ string) (*resources.SessionState, error
return &resources.SessionState{}, nil
}
+// GetBatchSessionState is not implemented.
+func (m *LVManager) GetBatchSessionState(_ []resources.SessionStateRequest) (map[string]resources.SessionState, error) {
+ return make(map[string]resources.SessionState), nil
+}
+
// GetFilesystemState is not implemented.
func (m *LVManager) GetFilesystemState() (models.FileSystem, error) {
// TODO(anatoly): Implement.
@@ -316,3 +321,17 @@ func (m *LVManager) KeepRelation(_ string) error {
return nil
}
+
+// GetDatasetOrigins provides a list of dataset origins.
+func (m *LVManager) GetDatasetOrigins(_ string) []string {
+ log.Msg("GetDatasetOrigins is not supported for LVM. Skip the operation")
+
+ return nil
+}
+
+// GetActiveDatasets provides a list of active datasets.
+func (m *LVManager) GetActiveDatasets(_ string) ([]string, error) {
+ log.Msg("GetDatasetOrigins is not supported for LVM. Skip the operation")
+
+ return nil, nil
+}
diff --git a/engine/internal/provision/thinclones/zfs/branching.go b/engine/internal/provision/thinclones/zfs/branching.go
index f446edc9..f2ed6666 100644
--- a/engine/internal/provision/thinclones/zfs/branching.go
+++ b/engine/internal/provision/thinclones/zfs/branching.go
@@ -126,32 +126,33 @@ func (m *Manager) VerifyBranchMetadata() error {
return nil
}
- latest := snapshots[0]
+ branchHeads := make(map[string]string)
- brName, err := m.getProperty(branchProp, latest.ID)
- if err != nil {
- log.Dbg("cannot find branch for snapshot", latest.ID, err.Error())
- }
+ for i := numberSnapshots; i > 0; i-- {
+ sn := snapshots[i-1]
+ log.Dbg(sn)
- for i := numberSnapshots; i > 1; i-- {
- if err := m.SetRelation(snapshots[i-1].ID, snapshots[i-2].ID); err != nil {
- return fmt.Errorf("failed to set snapshot relations: %w", err)
+ if err := m.DeleteBranchProp(sn.Branch, sn.ID); err != nil {
+ return fmt.Errorf("failed to clean branch property: %w", err)
}
- if brName == "" {
- brName, err = m.getProperty(branchProp, snapshots[i-1].ID)
- if err != nil {
- log.Dbg("cannot find branch for snapshot", snapshots[i-1].ID, err.Error())
- }
+ head, ok := branchHeads[sn.Branch]
+ if !ok {
+ branchHeads[sn.Branch] = sn.ID
+ continue
+ }
+
+ if err := m.SetRelation(head, sn.ID); err != nil {
+ return fmt.Errorf("failed to set snapshot relations: %w", err)
}
- }
- if brName == "" {
- brName = branching.DefaultBranch
+ branchHeads[sn.Branch] = sn.ID
}
- if err := m.AddBranchProp(brName, latest.ID); err != nil {
- return fmt.Errorf("failed to add branch property: %w", err)
+ for brName, latestID := range branchHeads {
+ if err := m.AddBranchProp(brName, latestID); err != nil {
+ return fmt.Errorf("failed to add branch property: %w", err)
+ }
}
log.Msg("data branching has been verified")
@@ -266,13 +267,15 @@ func (m *Manager) ListAllBranches(poolList []string) ([]models.BranchEntity, err
continue
}
+ dataset := branching.ParseBaseDatasetFromSnapshot(fields[1])
+
if !strings.Contains(fields[0], branchSep) {
- branches = append(branches, models.BranchEntity{Name: fields[0], SnapshotID: fields[1]})
+ branches = append(branches, models.BranchEntity{Name: fields[0], Dataset: dataset, SnapshotID: fields[1]})
continue
}
for _, branchName := range strings.Split(fields[0], branchSep) {
- branches = append(branches, models.BranchEntity{Name: branchName, SnapshotID: fields[1]})
+ branches = append(branches, models.BranchEntity{Name: branchName, Dataset: dataset, SnapshotID: fields[1]})
}
}
@@ -538,22 +541,6 @@ func (m *Manager) HasDependentEntity(snapshotName string) ([]string, error) {
dependentClones := strings.Split(clones, ",")
- // Check clones of dependent snapshots.
- if child != "" {
- // check all child snapshots
- childList := strings.Split(child, ",")
-
- for _, childSnapshot := range childList {
- // TODO: limit the max level of recursion.
- childClones, err := m.HasDependentEntity(childSnapshot)
- if err != nil {
- return nil, fmt.Errorf("failed to check dependent clones of dependent snapshots: %w", err)
- }
-
- dependentClones = append(dependentClones, childClones...)
- }
- }
-
return dependentClones, nil
}
diff --git a/engine/internal/provision/thinclones/zfs/zfs.go b/engine/internal/provision/thinclones/zfs/zfs.go
index c753b1cf..bc55fb68 100644
--- a/engine/internal/provision/thinclones/zfs/zfs.go
+++ b/engine/internal/provision/thinclones/zfs/zfs.go
@@ -9,6 +9,7 @@ import (
"encoding/base64"
"fmt"
"path"
+ "sort"
"strconv"
"strings"
"sync"
@@ -235,13 +236,27 @@ func (m *Manager) DestroyClone(branchName, cloneName string, revision int) error
return nil
}
+ cloneDataset := m.config.Pool.CloneDataset(branchName, cloneName)
+ cloneOrigins := m.GetDatasetOrigins(cloneDataset)
+
+ if m.hasDependentSnapshots(cloneOrigins, cloneMountName) {
+ log.Msg(fmt.Sprintf("clone %q has dependent snapshot; skipping", cloneMountName))
+ return nil
+ }
+
+ // TODO: check pre-clone for physical mode.
+ if len(cloneOrigins) <= branching.MinDatasetNumber {
+ // There are no other revisions, so we can destroy the entire clone dataset.
+ cloneMountName = cloneDataset
+ }
+
// Delete the clone and all snapshots and clones depending on it.
// TODO(anatoly): right now, we are using this function only for
// deleting thin clones created by users. If we are going to use
// this function to delete clones used during the preparation
// of baseline snapshots, we need to omit `-R`, to avoid
// unexpected deletion of users' clones.
- cmd := fmt.Sprintf("zfs destroy %s", cloneMountName)
+ cmd := fmt.Sprintf("zfs destroy -r %s", cloneMountName)
if _, err = m.runner.Run(cmd); err != nil {
if strings.Contains(cloneName, "clone_pre") {
@@ -254,6 +269,59 @@ func (m *Manager) DestroyClone(branchName, cloneName string, revision int) error
return nil
}
+func (m *Manager) GetDatasetOrigins(cloneDataset string) []string {
+ listZfsClonesCmd := "zfs list -H -o origin -r " + cloneDataset
+
+ out, err := m.runner.Run(listZfsClonesCmd, false)
+ if err != nil {
+ log.Warn(fmt.Sprintf("failed to check clone dataset %s: %v", cloneDataset, err))
+ return nil
+ }
+
+ lines := strings.Split(strings.TrimSpace(out), "\n")
+
+ return lines
+}
+
+func (m *Manager) GetActiveDatasets(cloneDataset string) ([]string, error) {
+ listZfsClonesCmd := fmt.Sprintf("zfs list -t snapshot -H -o name -r %s | grep %s", m.config.Pool.Name, cloneDataset)
+
+ out, err := m.runner.Run(listZfsClonesCmd, false)
+ if err != nil {
+ log.Dbg(fmt.Sprintf("no active datasets %s: %v", cloneDataset, err))
+ }
+
+ lines := strings.Split(strings.TrimSpace(out), "\n")
+
+ datasetRegistry := make([]string, 0, len(lines))
+
+ for _, line := range lines {
+ name := strings.TrimSpace(line)
+ if name == "" || name == empty {
+ continue
+ }
+
+ datasetRegistry = append(datasetRegistry, name)
+ }
+
+ return datasetRegistry, nil
+}
+
+func (m *Manager) hasDependentSnapshots(origins []string, cloneMountName string) bool {
+ for _, name := range origins {
+ if name == empty {
+ continue
+ }
+
+ if strings.HasPrefix(name, cloneMountName) {
+ log.Dbg(fmt.Sprintf("%s has dependent snapshot %s", cloneMountName, name))
+ return true
+ }
+ }
+
+ return false
+}
+
// cloneExists checks whether a ZFS clone exists.
func (m *Manager) cloneExists(name string) (bool, error) {
listZfsClonesCmd := "zfs list -r " + m.config.Pool.Name
@@ -373,11 +441,17 @@ func (m *Manager) CreateSnapshot(poolSuffix, dataStateAt string) (string, error)
return "", fmt.Errorf("failed to parse dataStateAt: %w", err)
}
+ branch := branching.ParseBranchNameFromSnapshot(snapshotName, poolName)
+ if branch == "" {
+ branch = branching.DefaultBranch
+ }
+
newSnapshot := resources.Snapshot{
ID: snapshotName,
CreatedAt: time.Now(),
DataStateAt: dataStateTime,
Pool: m.config.Pool.Name,
+ Branch: branch,
}
if !strings.HasSuffix(snapshotName, m.config.PreSnapshotSuffix) {
@@ -526,13 +600,128 @@ func (m *Manager) CleanupSnapshots(retentionLimit int) ([]string, error) {
return nil, errors.Wrap(err, "failed to clean up snapshots")
}
+ if err := m.cleanupEmptyDatasets(clonesOutput); err != nil {
+ return nil, fmt.Errorf("failed to clean up empty datasets: %w", err)
+ }
+
lines := strings.Split(out, "\n")
m.RefreshSnapshotList()
+ firstSnapshotID := ""
+
+ m.mu.Lock()
+ if l := len(m.snapshots); l > 0 {
+ firstSnapshotID = m.snapshots[l-1].ID
+ }
+ m.mu.Unlock()
+
+ m.reviewParentProperty(firstSnapshotID)
+
return lines, nil
}
+func (m *Manager) reviewParentProperty(snapshotID string) {
+ if snapshotID == "" {
+ return
+ }
+
+ parent, err := m.getProperty(parentProp, snapshotID)
+ if err != nil {
+ log.Err("failed to review parent property:", err)
+
+ return
+ }
+
+ if parent == "" {
+ return
+ }
+
+ _, err = m.GetSnapshotProperties(parent)
+ if err != nil {
+ // Parent snapshot not found, clean up the property.
+ if err = m.setParent("", snapshotID); err != nil {
+ log.Err(err)
+ }
+ }
+}
+
+func (m *Manager) cleanupEmptyDatasets(clonesOutput string) error {
+ datasetsToRemove := m.getEmptyDatasets(clonesOutput)
+
+ for _, dataset := range datasetsToRemove {
+ log.Dbg("Remove empty dataset: ", dataset)
+
+ if err := m.DestroyDataset(dataset); err != nil {
+ return fmt.Errorf("failed to destroy dataset %s: %w", dataset, err)
+ }
+ }
+
+ return nil
+}
+
+func (m *Manager) getEmptyDatasets(clonesOutput string) []string {
+ const outputParts = 2
+
+ lines := strings.Split(strings.TrimSpace(clonesOutput), "\n")
+
+ allDatasets := make(map[string]struct{})
+ emptyDatasets := []string{}
+
+ for _, line := range lines {
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ if len(parts) != outputParts {
+ continue
+ }
+
+ dataset := parts[0]
+ origin := parts[1]
+
+ // Skip branch datasets (only process clones)
+ // /branch///r
+ pathParts := strings.Split(dataset, "/")
+ if len(pathParts) <= 3 || pathParts[1] != branching.BranchDir {
+ continue
+ }
+
+ allDatasets[dataset] = struct{}{}
+
+ if origin == empty {
+ emptyDatasets = append(emptyDatasets, dataset)
+ }
+ }
+
+ // Find empty datasets without children
+ datasetsToRemove := []string{}
+
+ for _, dataset := range emptyDatasets {
+ hasChild := false
+ prefix := dataset + "/"
+
+ for other := range allDatasets {
+ if strings.HasPrefix(other, prefix) {
+ hasChild = true
+ break
+ }
+ }
+
+ if !hasChild {
+ datasetsToRemove = append(datasetsToRemove, dataset)
+ }
+ }
+
+ // Sort by depth (the deepest first) to avoid conflicts
+ sort.Slice(datasetsToRemove, func(i, j int) bool {
+ return strings.Count(datasetsToRemove[i], "/") > strings.Count(datasetsToRemove[j], "/")
+ })
+
+ return datasetsToRemove
+}
+
func (m *Manager) getBusySnapshotList(clonesOutput string) []string {
systemClones := make(map[string]string)
branchingSnapshotDatasets := []string{}
@@ -594,7 +783,7 @@ func (m *Manager) GetSessionState(branch, name string) (*resources.SessionState,
var sEntry *ListEntry
- entryName := path.Join(m.config.Pool.Name, "branch", branch, name)
+ entryName := branching.CloneDataset(m.config.Pool.Name, branch, name)
for _, entry := range entries {
if entry.Name == entryName {
@@ -615,6 +804,34 @@ func (m *Manager) GetSessionState(branch, name string) (*resources.SessionState,
return state, nil
}
+// GetBatchSessionState returns session states for multiple clones in a single ZFS query.
+func (m *Manager) GetBatchSessionState(requests []resources.SessionStateRequest) (map[string]resources.SessionState, error) {
+ entries, err := m.listFilesystems(m.config.Pool.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list filesystems: %w", err)
+ }
+
+ entryMap := make(map[string]*ListEntry, len(entries))
+ for _, entry := range entries {
+ entryMap[entry.Name] = entry
+ }
+
+ sessionStates := make(map[string]resources.SessionState, len(requests))
+
+ for _, req := range requests {
+ entryName := branching.CloneDataset(m.config.Pool.Name, req.Branch, req.CloneID)
+
+ if entry, ok := entryMap[entryName]; ok {
+ sessionStates[req.CloneID] = resources.SessionState{
+ CloneDiffSize: entry.Used,
+ LogicalReferenced: entry.LogicalReferenced,
+ }
+ }
+ }
+
+ return sessionStates, nil
+}
+
// GetFilesystemState returns a disk state.
func (m *Manager) GetFilesystemState() (models.FileSystem, error) {
parts := strings.SplitN(m.config.Pool.Name, "/", 2)
diff --git a/engine/internal/provision/thinclones/zfs/zfs_test.go b/engine/internal/provision/thinclones/zfs/zfs_test.go
index 0001c8a6..040eb73a 100644
--- a/engine/internal/provision/thinclones/zfs/zfs_test.go
+++ b/engine/internal/provision/thinclones/zfs/zfs_test.go
@@ -2,6 +2,7 @@ package zfs
import (
"errors"
+ "sort"
"testing"
"github.com/stretchr/testify/assert"
@@ -234,3 +235,120 @@ func TestSnapshotList(t *testing.T) {
require.Equal(t, []resources.Snapshot{{ID: "test3"}, {ID: "test1"}}, fsManager.SnapshotList())
})
}
+
+func TestCleanupEmptyDatasets(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expectedDestroyed []string
+ }{
+ {
+ name: "datasets with children should not be removed",
+ input: `test_pool -
+test_pool/branch -
+test_pool/branch/main -
+test_pool/branch/main/clone001 -
+test_pool/branch/main/clone001/r0 test_pool@snapshot001`,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "empty branch datasets without children should not be removed",
+ input: `test_pool -
+test_pool/branch -
+test_pool/branch/main -
+test_pool/branch/branch1 -
+test_pool/branch/branch2 -`,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "mixed case - some with children, some without",
+ input: `test_pool -
+test_pool/branch -
+test_pool/branch/main -
+test_pool/branch/main/clone001 -
+test_pool/branch/main/clone001/r0 test_pool@snapshot001
+test_pool/branch/main/clone002 -
+test_pool/branch/main/clone002/r0 test_pool@snapshot002
+test_pool/branch/branch -
+test_pool/temporary -`,
+ expectedDestroyed: []string{},
+ },
+
+ {
+ name: "empty input",
+ input: ``,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "only whitespace",
+ input: ` `,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "malformed lines should be skipped",
+ input: `test_pool -
+test_pool/branch
+invalid line without tabs
+test_pool/orphaned -
+ -
+test_pool/valid test_pool@snap1`,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "original example",
+ input: `test_dblab_pool -
+test_dblab_pool/branch -
+test_dblab_pool/branch/main -
+test_dblab_pool/branch/main/clone_pre_20250923095219 -
+test_dblab_pool/branch/main/clone_pre_20250923095219/r0 test_dblab_pool@snapshot_20250923095219_pre
+test_dblab_pool/branch/main/clone_pre_20250923095500 -
+test_dblab_pool/branch/main/clone_pre_20250923095500/r0 test_dblab_pool@snapshot_20250923095500_pre
+test_dblab_pool/branch/main/clone_pre_20250923100000 -
+test_dblab_pool/branch/main/clone_pre_20250923100000/r0 test_dblab_pool@snapshot_20250923100000_pre`,
+ expectedDestroyed: []string{},
+ },
+ {
+ name: "should skip branch datasets and only process clones",
+ input: `test_pool -
+test_pool/branch -
+test_pool/branch/main -
+test_pool/branch/main/clone001 -
+test_pool/branch/main/clone002 -
+test_pool/branch/feature -
+test_pool/branch/feature/orphaned_clone -
+test_pool/other -
+test_pool/other/dataset -`,
+ expectedDestroyed: []string{
+ "test_pool/branch/main/clone001",
+ "test_pool/branch/main/clone002",
+ "test_pool/branch/feature/orphaned_clone",
+ // Note: test_pool/other/dataset is NOT removed (not under /branch/)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fsManager := NewFSManager(runnerMock{}, Config{Pool: &resources.Pool{Name: "testPool"}})
+
+ destroyedDatasets := fsManager.getEmptyDatasets(tt.input)
+
+ sort.Strings(destroyedDatasets)
+ sort.Strings(tt.expectedDestroyed)
+
+ if len(destroyedDatasets) != len(tt.expectedDestroyed) {
+ t.Errorf("destroyed count mismatch: got %d, want %d\nDestroyed: %v\nExpected: %v",
+ len(destroyedDatasets), len(tt.expectedDestroyed),
+ destroyedDatasets, tt.expectedDestroyed)
+ return
+ }
+
+ for i := range destroyedDatasets {
+ if destroyedDatasets[i] != tt.expectedDestroyed[i] {
+ t.Errorf("destroyed dataset mismatch at index %d: got %s, want %s",
+ i, destroyedDatasets[i], tt.expectedDestroyed[i])
+ }
+ }
+ })
+ }
+}
diff --git a/engine/internal/retrieval/engine/postgres/logical/logical.go b/engine/internal/retrieval/engine/postgres/logical/logical.go
index bd985a87..0d3ea39b 100644
--- a/engine/internal/retrieval/engine/postgres/logical/logical.go
+++ b/engine/internal/retrieval/engine/postgres/logical/logical.go
@@ -28,7 +28,7 @@ func isAlreadyMounted(mounts []mount.Mount, dir string) bool {
dir = strings.Trim(dir, "/")
for _, mountPoint := range mounts {
- if strings.Trim(mountPoint.Source, "/") == dir {
+ if strings.Trim(mountPoint.Source, "/") == dir || strings.Trim(mountPoint.Target, "/") == dir {
return true
}
}
diff --git a/engine/internal/retrieval/engine/postgres/logical/logical_test.go b/engine/internal/retrieval/engine/postgres/logical/logical_test.go
index 0c61cd52..ead6521d 100644
--- a/engine/internal/retrieval/engine/postgres/logical/logical_test.go
+++ b/engine/internal/retrieval/engine/postgres/logical/logical_test.go
@@ -38,6 +38,16 @@ func TestIsAlreadyMounted(t *testing.T) {
dumpLocation: "/var/lib/dblab/new_pool/dump",
expectedResult: false,
},
+ {
+ source: []mount.Mount{{Source: "/host/path/dump", Target: "/var/lib/dblab/pool/dump"}},
+ dumpLocation: "/var/lib/dblab/pool/dump",
+ expectedResult: true,
+ },
+ {
+ source: []mount.Mount{{Source: "/host/path/dump", Target: "/var/lib/dblab/pool/dump/"}},
+ dumpLocation: "/var/lib/dblab/pool/dump",
+ expectedResult: true,
+ },
}
for _, tc := range testCases {
diff --git a/engine/internal/retrieval/engine/postgres/snapshot/physical.go b/engine/internal/retrieval/engine/postgres/snapshot/physical.go
index f49b9d8d..f4177db6 100644
--- a/engine/internal/retrieval/engine/postgres/snapshot/physical.go
+++ b/engine/internal/retrieval/engine/postgres/snapshot/physical.go
@@ -367,7 +367,8 @@ func (p *PhysicalInitial) run(ctx context.Context) (err error) {
defer func() {
if err != nil {
- if errDestroy := p.cloneManager.DestroyClone(branching.DefaultBranch, cloneName, branching.DefaultRevision); errDestroy != nil {
+ cloneDataset := branching.CloneDataset(p.fsPool.Name, branching.DefaultBranch, cloneName)
+ if errDestroy := p.cloneManager.DestroyDataset(cloneDataset); errDestroy != nil {
log.Err(fmt.Sprintf("failed to destroy clone %q: %v", cloneName, errDestroy))
}
}
@@ -402,6 +403,10 @@ func (p *PhysicalInitial) run(ctx context.Context) (err error) {
p.tm.SendEvent(ctx, telemetry.SnapshotCreatedEvent, telemetry.SnapshotCreated{})
+ if err := p.cloneManager.VerifyBranchMetadata(); err != nil {
+ log.Warn("cannot verify branch metadata", err.Error())
+ }
+
if err := p.cleanupOldLogs(); err != nil {
log.Warn("cannot clean up old logs", err.Error())
}
diff --git a/engine/internal/retrieval/engine/postgres/tools/tools.go b/engine/internal/retrieval/engine/postgres/tools/tools.go
index 1fe2cefe..b8ac9ce7 100644
--- a/engine/internal/retrieval/engine/postgres/tools/tools.go
+++ b/engine/internal/retrieval/engine/postgres/tools/tools.go
@@ -183,6 +183,7 @@ func AddVolumesToHostConfig(ctx context.Context, docker *client.Client, hostConf
// GetMountsFromMountPoints creates a list of mounts.
func GetMountsFromMountPoints(dataDir string, mountPoints []types.MountPoint) []mount.Mount {
mounts := make([]mount.Mount, 0, len(mountPoints))
+ seen := make(map[string]struct{})
for _, mountPoint := range mountPoints {
// Rewrite mounting to data directory.
@@ -192,6 +193,18 @@ func GetMountsFromMountPoints(dataDir string, mountPoints []types.MountPoint) []
mountPoint.Destination = dataDir
}
+ // Deduplicate mounts by normalizing paths and checking both source and target.
+ normalizedSource := strings.Trim(mountPoint.Source, "/")
+ normalizedTarget := strings.Trim(mountPoint.Destination, "/")
+ mountKey := normalizedSource + "|" + normalizedTarget
+
+ if _, ok := seen[mountKey]; ok {
+ log.Dbg("skipping duplicate mount", mountPoint.Source, "to", mountPoint.Destination)
+ continue
+ }
+
+ seen[mountKey] = struct{}{}
+
mounts = append(mounts, mount.Mount{
Type: mountPoint.Type,
Source: mountPoint.Source,
diff --git a/engine/internal/retrieval/engine/postgres/tools/tools_test.go b/engine/internal/retrieval/engine/postgres/tools/tools_test.go
index 9398470e..3c8aba14 100644
--- a/engine/internal/retrieval/engine/postgres/tools/tools_test.go
+++ b/engine/internal/retrieval/engine/postgres/tools/tools_test.go
@@ -37,11 +37,13 @@ func TestIfDirectoryEmpty(t *testing.T) {
func TestGetMountsFromMountPoints(t *testing.T) {
testCases := []struct {
+ name string
dataDir string
mountPoints []types.MountPoint
expectedPoints []mount.Mount
}{
{
+ name: "simple mount without transformation",
dataDir: "/var/lib/dblab/clones/dblab_clone_6000/data",
mountPoints: []types.MountPoint{{
Source: "/var/lib/pgsql/data",
@@ -56,8 +58,8 @@ func TestGetMountsFromMountPoints(t *testing.T) {
},
}},
},
-
{
+ name: "mount with path transformation",
dataDir: "/var/lib/dblab/clones/dblab_clone_6000/data",
mountPoints: []types.MountPoint{{
Source: "/var/lib/postgresql",
@@ -72,10 +74,44 @@ func TestGetMountsFromMountPoints(t *testing.T) {
},
}},
},
+ {
+ name: "deduplicate identical mounts",
+ dataDir: "/var/lib/dblab/data",
+ mountPoints: []types.MountPoint{
+ {Source: "/host/dump", Destination: "/var/lib/dblab/dump"},
+ {Source: "/host/dump", Destination: "/var/lib/dblab/dump"},
+ },
+ expectedPoints: []mount.Mount{{
+ Source: "/host/dump",
+ Target: "/var/lib/dblab/dump",
+ ReadOnly: true,
+ BindOptions: &mount.BindOptions{
+ Propagation: "",
+ },
+ }},
+ },
+ {
+ name: "deduplicate mounts with trailing slashes",
+ dataDir: "/var/lib/dblab/data",
+ mountPoints: []types.MountPoint{
+ {Source: "/host/dump/", Destination: "/var/lib/dblab/dump"},
+ {Source: "/host/dump", Destination: "/var/lib/dblab/dump/"},
+ },
+ expectedPoints: []mount.Mount{{
+ Source: "/host/dump/",
+ Target: "/var/lib/dblab/dump",
+ ReadOnly: true,
+ BindOptions: &mount.BindOptions{
+ Propagation: "",
+ },
+ }},
+ },
}
for _, tc := range testCases {
- mounts := GetMountsFromMountPoints(tc.dataDir, tc.mountPoints)
- assert.Equal(t, tc.expectedPoints, mounts)
+ t.Run(tc.name, func(t *testing.T) {
+ mounts := GetMountsFromMountPoints(tc.dataDir, tc.mountPoints)
+ assert.Equal(t, tc.expectedPoints, mounts)
+ })
}
}
diff --git a/engine/internal/srv/branch.go b/engine/internal/srv/branch.go
index 389b931c..1941591b 100644
--- a/engine/internal/srv/branch.go
+++ b/engine/internal/srv/branch.go
@@ -47,9 +47,6 @@ func (s *Server) listBranches(w http.ResponseWriter, r *http.Request) {
branchDetails := make([]models.BranchView, 0, len(branches))
- // branchRegistry is used to display the "main" branch with only the most recent snapshot.
- branchRegistry := make(map[string]int, 0)
-
for _, branchEntity := range branches {
snapshotDetails, ok := repo.Snapshots[branchEntity.SnapshotID]
if !ok {
@@ -60,6 +57,7 @@ func (s *Server) listBranches(w http.ResponseWriter, r *http.Request) {
branchView := models.BranchView{
Name: branchEntity.Name,
+ BaseDataset: branchEntity.Dataset,
Parent: parentSnapshot,
DataStateAt: snapshotDetails.DataStateAt,
SnapshotID: snapshotDetails.ID,
@@ -67,15 +65,6 @@ func (s *Server) listBranches(w http.ResponseWriter, r *http.Request) {
NumSnapshots: numSnapshots,
}
- if position, ok := branchRegistry[branchEntity.Name]; ok {
- if branchView.DataStateAt > branchDetails[position].DataStateAt {
- branchDetails[position] = branchView
- }
-
- continue
- }
-
- branchRegistry[branchView.Name] = len(branchDetails)
branchDetails = append(branchDetails, branchView)
}
@@ -136,15 +125,36 @@ func containsString(slice []string, s string) bool {
}
func (s *Server) getFSManagerForBranch(branchName string) (pool.FSManager, error) {
+ return s.getFSManagerForBranchAndDataset(branchName, "")
+}
+
+func (s *Server) getFSManagerForBranchAndDataset(branchName, dataset string) (pool.FSManager, error) {
allBranches, err := s.getAllAvailableBranches(s.pm.First())
if err != nil {
return nil, fmt.Errorf("failed to get branch list: %w", err)
}
for _, branchEntity := range allBranches {
- if branchEntity.Name == branchName { // TODO: filter by pool name as well because branch name is ambiguous.
+ if branchEntity.Name != branchName {
+ continue
+ }
+
+ if dataset == "" {
return s.getFSManagerForSnapshot(branchEntity.SnapshotID)
}
+
+ fsm, err := s.getFSManagerForSnapshot(branchEntity.SnapshotID)
+ if err != nil {
+ continue
+ }
+
+ if fsm.Pool().Name == dataset {
+ return fsm, nil
+ }
+ }
+
+ if dataset != "" {
+ return nil, fmt.Errorf("failed to find dataset %s of the branch: %s", dataset, branchName)
}
return nil, fmt.Errorf("failed to found dataset of the branch: %s", branchName)
@@ -185,6 +195,16 @@ func (s *Server) createBranch(w http.ResponseWriter, r *http.Request) {
}
}
+ snapshotID := createRequest.SnapshotID
+
+ if snapshotID != "" {
+ fsm, err = s.getFSManagerForSnapshot(snapshotID)
+ if err != nil {
+ api.SendBadRequestError(w, r, err.Error())
+ return
+ }
+ }
+
if fsm == nil {
api.SendBadRequestError(w, r, "no pool manager found")
return
@@ -201,8 +221,6 @@ func (s *Server) createBranch(w http.ResponseWriter, r *http.Request) {
return
}
- snapshotID := createRequest.SnapshotID
-
if snapshotID == "" {
if createRequest.BaseBranch == "" {
api.SendBadRequestError(w, r, "either base branch name or base snapshot ID must be specified")
@@ -434,17 +452,6 @@ func (s *Server) snapshot(w http.ResponseWriter, r *http.Request) {
return
}
- snapshot, err := s.Cloning.GetSnapshotByID(snapshotName)
- if err != nil {
- api.SendBadRequestError(w, r, err.Error())
- return
- }
-
- if err := s.Cloning.UpdateCloneSnapshot(clone.ID, snapshot); err != nil {
- api.SendBadRequestError(w, r, err.Error())
- return
- }
-
s.tm.SendEvent(context.Background(), telemetry.SnapshotCreatedEvent, telemetry.SnapshotCreated{})
if err := api.WriteJSON(w, http.StatusOK, types.SnapshotResponse{SnapshotID: snapshotName}); err != nil {
@@ -472,6 +479,18 @@ func filterSnapshotsByBranch(pool *resources.Pool, branch string, snapshots []mo
return filtered
}
+func filterSnapshotsByDataset(dataset string, snapshots []models.Snapshot) []models.Snapshot {
+ filtered := make([]models.Snapshot, 0)
+
+ for _, sn := range snapshots {
+ if sn.Pool == dataset {
+ filtered = append(filtered, sn)
+ }
+ }
+
+ return filtered
+}
+
func (s *Server) log(w http.ResponseWriter, r *http.Request) {
branchName := mux.Vars(r)["branchName"]
@@ -632,11 +651,17 @@ func snapshotsToRemove(repo *models.Repo, snapshotID, branchName string) []strin
}
func traverseUp(repo *models.Repo, snapshotID, branchName string) []string {
+ removingList := []string{}
+ visited := make(map[string]struct{})
snapshotPointer := repo.Snapshots[snapshotID]
- removingList := []string{}
+ for snapshotPointer.Parent != "-" && snapshotPointer.Parent != "" {
+ if _, found := visited[snapshotPointer.ID]; found {
+ break
+ }
+
+ visited[snapshotPointer.ID] = struct{}{}
- for snapshotPointer.Parent != "-" {
for _, snapshotRoot := range snapshotPointer.Root {
if snapshotRoot == branchName {
return removingList
@@ -644,7 +669,13 @@ func traverseUp(repo *models.Repo, snapshotID, branchName string) []string {
}
removingList = append(removingList, snapshotPointer.ID)
- snapshotPointer = repo.Snapshots[snapshotPointer.Parent]
+
+ nextSnapshot, exists := repo.Snapshots[snapshotPointer.Parent]
+ if !exists {
+ break
+ }
+
+ snapshotPointer = nextSnapshot
}
return removingList
diff --git a/engine/internal/srv/routes.go b/engine/internal/srv/routes.go
index 15f2ab56..eedbd2eb 100644
--- a/engine/internal/srv/routes.go
+++ b/engine/internal/srv/routes.go
@@ -55,7 +55,7 @@ func (s *Server) retrievalState(w http.ResponseWriter, r *http.Request) {
retrieving.NextRefresh = models.NewLocalTime(spec.Next(time.Now()))
}
- retrieving.Activity = s.jobActivity(r.Context())
+ retrieving.Activity = s.jobActivity(context.Background())
if err := api.WriteJSON(w, http.StatusOK, retrieving); err != nil {
api.SendError(w, r, err)
@@ -108,8 +108,11 @@ func (s *Server) getSnapshots(w http.ResponseWriter, r *http.Request) {
return
}
- if branchRequest := r.URL.Query().Get("branch"); branchRequest != "" {
- fsm, err := s.getFSManagerForBranch(branchRequest)
+ branchRequest := r.URL.Query().Get("branch")
+ datasetRequest := r.URL.Query().Get("dataset")
+
+ if branchRequest != "" {
+ fsm, err := s.getFSManagerForBranchAndDataset(branchRequest, datasetRequest)
if err != nil {
api.SendBadRequestError(w, r, err.Error())
return
@@ -123,6 +126,10 @@ func (s *Server) getSnapshots(w http.ResponseWriter, r *http.Request) {
snapshots = filterSnapshotsByBranch(fsm.Pool(), branchRequest, snapshots)
}
+ if branchRequest == "" && datasetRequest != "" {
+ snapshots = filterSnapshotsByDataset(datasetRequest, snapshots)
+ }
+
if err = api.WriteJSON(w, http.StatusOK, snapshots); err != nil {
api.SendError(w, r, err)
return
@@ -182,7 +189,9 @@ func (s *Server) createSnapshot(w http.ResponseWriter, r *http.Request) {
return
}
- // TODO: set branching metadata.
+ if err := fsManager.VerifyBranchMetadata(); err != nil {
+ log.Warn("cannot verify branch metadata", err.Error())
+ }
latestSnapshot := snapshotList[0]
@@ -320,6 +329,28 @@ func (s *Server) deleteSnapshot(w http.ResponseWriter, r *http.Request) {
}
}
+ // When recursively deleting, set the branch label to the parent
+ if force && snapshotProperties.Parent != "" {
+ parentProps, err := fsm.GetSnapshotProperties(snapshotProperties.Parent)
+ if err != nil {
+ log.Err(err.Error())
+ }
+
+ branchName := snapshotProperties.Branch
+ fullDataset, _, found := strings.Cut(snapshotID, "@")
+
+ if branchName == "" && found {
+ branchName, _ = branching.ParseBranchName(fullDataset, poolName)
+ }
+
+ if branchName != "" && !isRoot(parentProps.Root, branchName) {
+ err := fsm.AddBranchProp(branchName, snapshotProperties.Parent)
+ if err != nil {
+ log.Err(err.Error())
+ }
+ }
+ }
+
if err = fsm.DestroySnapshot(snapshotID, thinclones.DestroyOptions{Force: force}); err != nil {
api.SendBadRequestError(w, r, err.Error())
return
@@ -331,22 +362,37 @@ func (s *Server) deleteSnapshot(w http.ResponseWriter, r *http.Request) {
return
}
- if snapshotProperties.Clones == "" && snapshot.NumClones == 0 {
+ if snapshotProperties.Clones == "" && snapshot.NumClones == 0 && snapshotProperties.Child == "" {
// Destroy dataset if there are no related objects
if fullDataset, _, found := strings.Cut(snapshotID, "@"); found && fullDataset != poolName {
- if err = fsm.DestroyDataset(fullDataset); err != nil {
+ activeDatasets, err := fsm.GetActiveDatasets(fullDataset)
+ if err != nil {
api.SendBadRequestError(w, r, err.Error())
return
}
+ // No active datasets or clones
+ if len(activeDatasets) == 0 && !s.hasActiveClone(fullDataset, poolName) {
+ if err = fsm.DestroyDataset(fullDataset); err != nil {
+ api.SendBadRequestError(w, r, err.Error())
+ return
+ }
+ }
+
// Remove dle:branch and dle:root from parent snapshot
if snapshotProperties.Parent != "" {
+ parentProps, err := fsm.GetSnapshotProperties(snapshotProperties.Parent)
+ if err != nil {
+ log.Err(err.Error())
+ }
+
branchName := snapshotProperties.Branch
if branchName == "" {
branchName, _ = branching.ParseBranchName(fullDataset, poolName)
}
- if branchName != "" {
+ // Clean up user branch labels and prevent main branch deletion
+ if branchName != "" && branchName != branching.DefaultBranch && isRoot(parentProps.Root, branchName) {
if err := fsm.DeleteBranchProp(branchName, snapshotProperties.Parent); err != nil {
log.Err(err.Error())
}
@@ -357,11 +403,18 @@ func (s *Server) deleteSnapshot(w http.ResponseWriter, r *http.Request) {
}
}
- // TODO: review all available revisions. Destroy base dataset only if there no any revision.
- if baseDataset, found := strings.CutSuffix(fullDataset, "/r0"); found {
- if err = fsm.DestroyDataset(baseDataset); err != nil {
- api.SendBadRequestError(w, r, err.Error())
- return
+ // Check if the dataset ends with revision (for example, /r0)
+ if branching.RevisionPattern.MatchString(fullDataset) {
+ // Remove the revision suffix
+ baseDataset := branching.RevisionPattern.ReplaceAllString(fullDataset, "")
+ origins := fsm.GetDatasetOrigins(baseDataset)
+
+ // If this is the last revision, remove the base dataset
+ if len(origins) < branching.MinDatasetNumber {
+ if err = fsm.DestroyDataset(baseDataset); err != nil {
+ api.SendBadRequestError(w, r, err.Error())
+ return
+ }
}
}
}
@@ -389,6 +442,30 @@ func (s *Server) deleteSnapshot(w http.ResponseWriter, r *http.Request) {
}
}
+func (s *Server) hasActiveClone(fullDataset, poolName string) bool {
+ cloneID, ok := branching.ParseCloneName(fullDataset, poolName)
+ if !ok {
+ return false
+ }
+
+ _, errClone := s.Cloning.GetClone(cloneID)
+ if errClone != nil && errClone.Error() == "clone not found" {
+ return false
+ }
+
+ return true
+}
+
+func isRoot(root, branch string) bool {
+ if root == "" || branch == "" {
+ return false
+ }
+
+ rootBranches := strings.Split(root, ",")
+
+ return containsString(rootBranches, branch)
+}
+
func (s *Server) detectPoolName(snapshotID string) (string, error) {
const snapshotParts = 2
diff --git a/engine/pkg/models/branch.go b/engine/pkg/models/branch.go
index e29f3cc7..dcdf4203 100644
--- a/engine/pkg/models/branch.go
+++ b/engine/pkg/models/branch.go
@@ -35,6 +35,7 @@ type SnapshotDetails struct {
// BranchView describes branch view.
type BranchView struct {
Name string `json:"name"`
+ BaseDataset string `json:"baseDataset"`
Parent string `json:"parent"`
DataStateAt string `json:"dataStateAt"`
SnapshotID string `json:"snapshotID"`
@@ -45,5 +46,6 @@ type BranchView struct {
// BranchEntity defines a branch-snapshot pair.
type BranchEntity struct {
Name string
+ Dataset string
SnapshotID string
}
diff --git a/engine/pkg/models/clone.go b/engine/pkg/models/clone.go
index b7300175..da6e4d1c 100644
--- a/engine/pkg/models/clone.go
+++ b/engine/pkg/models/clone.go
@@ -6,17 +6,16 @@ package models
// Clone defines a clone model.
type Clone struct {
- ID string `json:"id"`
- Snapshot *Snapshot `json:"snapshot"`
- Branch string `json:"branch"`
- Revision int `json:"revision"`
- HasDependent bool `json:"hasDependent"`
- Protected bool `json:"protected"`
- DeleteAt *LocalTime `json:"deleteAt"`
- CreatedAt *LocalTime `json:"createdAt"`
- Status Status `json:"status"`
- DB Database `json:"db"`
- Metadata CloneMetadata `json:"metadata"`
+ ID string `json:"id"`
+ Snapshot *Snapshot `json:"snapshot"`
+ Branch string `json:"branch"`
+ Revision int `json:"revision"`
+ Protected bool `json:"protected"`
+ DeleteAt *LocalTime `json:"deleteAt"`
+ CreatedAt *LocalTime `json:"createdAt"`
+ Status Status `json:"status"`
+ DB Database `json:"db"`
+ Metadata CloneMetadata `json:"metadata"`
}
// CloneMetadata contains fields describing a clone model.
diff --git a/engine/pkg/util/branching/branching.go b/engine/pkg/util/branching/branching.go
index 75053856..3439dbdb 100644
--- a/engine/pkg/util/branching/branching.go
+++ b/engine/pkg/util/branching/branching.go
@@ -8,6 +8,7 @@ package branching
import (
"fmt"
"path"
+ "regexp"
"strings"
)
@@ -20,8 +21,16 @@ const (
// BranchDir defines branch directory in the pool.
BranchDir = "branch"
+
+ // MinDatasetNumber is 2 because:
+ // - test_dblab_pool/branch/001-branch/clone001 - root
+ // - test_dblab_pool/branch/001-branch/clone001/r0 - revision
+ MinDatasetNumber = 2
)
+// RevisionPattern creates a regex pattern to match dataset revision.
+var RevisionPattern = regexp.MustCompile(`/r\d+$`)
+
// BranchName returns a full branch name in the data pool.
func BranchName(poolName, branchName string) string {
return path.Join(poolName, BranchDir, branchName)
@@ -108,3 +117,15 @@ func ParseBranchNameFromSnapshot(snapshot, poolName string) string {
return branch
}
+
+// ParseBaseDatasetFromSnapshot parses base dataset from the snapshot ID.
+func ParseBaseDatasetFromSnapshot(snapshot string) string {
+ fullDataset, _, found := strings.Cut(snapshot, "@")
+ if !found {
+ return ""
+ }
+
+ dataset, _, _ := strings.Cut(fullDataset, "/"+BranchDir+"/")
+
+ return dataset
+}
diff --git a/engine/pkg/util/networks/networks.go b/engine/pkg/util/networks/networks.go
index 311db071..c7aebcb1 100644
--- a/engine/pkg/util/networks/networks.go
+++ b/engine/pkg/util/networks/networks.go
@@ -55,9 +55,8 @@ func Setup(ctx context.Context, dockerCLI *client.Client, instanceID, containerN
"app": DLEApp,
"type": InternalType,
},
- Attachable: true,
- Internal: true,
- CheckDuplicate: true,
+ Attachable: true,
+ Internal: true,
})
if err != nil {
return "", err
diff --git a/engine/scripts/cli_install.sh b/engine/scripts/cli_install.sh
index ee46abfb..8dbda161 100644
--- a/engine/scripts/cli_install.sh
+++ b/engine/scripts/cli_install.sh
@@ -31,7 +31,7 @@ esac
arch=$(uname -m)
case "$arch" in
x86_64*) arch="amd64" ;;
- arm64*) arch="arm64" ;;
+ arm64*|aarch64*) arch="arm64" ;;
*) echo "Unsupported architecture: $arch"; exit 1 ;;
esac
diff --git a/engine/test/2.logical_generic.sh b/engine/test/2.logical_generic.sh
index 93fdb268..fb21a94b 100644
--- a/engine/test/2.logical_generic.sh
+++ b/engine/test/2.logical_generic.sh
@@ -19,6 +19,10 @@ export DLE_SERVER_PORT=${DLE_SERVER_PORT:-12345}
export DLE_PORT_POOL_FROM=${DLE_PORT_POOL_FROM:-9000}
export DLE_PORT_POOL_TO=${DLE_PORT_POOL_TO:-9099}
+if [ "${POSTGRES_VERSION}" = "18" ]; then
+ EXTENDED_IMAGE_TAG=""
+fi
+
DIR=${0%/*}
if [[ "${SOURCE_HOST}" = "172.17.0.1" ]]; then
@@ -103,7 +107,7 @@ yq eval -i '
.provision.portPool.to = env(DLE_PORT_POOL_TO) |
.retrieval.spec.logicalDump.options.dumpLocation = env(DLE_TEST_MOUNT_DIR) + "/" + env(DLE_TEST_POOL_NAME) + "/dump" |
.retrieval.spec.logicalRestore.options.dumpLocation = env(DLE_TEST_MOUNT_DIR) + "/" + env(DLE_TEST_POOL_NAME) + "/dump" |
- .databaseContainer.dockerImage = "registry.gitlab.com/postgres-ai/custom-images/extended-postgres:" + strenv(POSTGRES_VERSION) + env(EXTENDED_IMAGE_TAG)
+ .databaseContainer.dockerImage = "registry.gitlab.com/postgres-ai/custom-images/extended-postgres:" + strenv(POSTGRES_VERSION) + strenv(EXTENDED_IMAGE_TAG)
' "${configDir}/server.yml"
SHARED_PRELOAD_LIBRARIES="pg_stat_statements, auto_explain, pgaudit, logerrors, pg_stat_kcache"
diff --git a/engine/test/4.physical_basebackup.sh b/engine/test/4.physical_basebackup.sh
index eb562197..d7d1e001 100644
--- a/engine/test/4.physical_basebackup.sh
+++ b/engine/test/4.physical_basebackup.sh
@@ -18,6 +18,10 @@ export DLE_SERVER_PORT=${DLE_SERVER_PORT:-12345}
export DLE_PORT_POOL_FROM=${DLE_PORT_POOL_FROM:-9000}
export DLE_PORT_POOL_TO=${DLE_PORT_POOL_TO:-9099}
+if [ "${POSTGRES_VERSION}" = "18" ]; then
+ EXTENDED_IMAGE_TAG=""
+fi
+
DIR=${0%/*}
if [[ "${SOURCE_HOST}" = "172.17.0.1" ]]; then
@@ -115,7 +119,7 @@ yq eval -i '
.poolManager.mountDir = env(DLE_TEST_MOUNT_DIR) |
.provision.portPool.from = env(DLE_PORT_POOL_FROM) |
.provision.portPool.to = env(DLE_PORT_POOL_TO) |
- .databaseContainer.dockerImage = "registry.gitlab.com/postgres-ai/custom-images/extended-postgres:" + strenv(POSTGRES_VERSION) + env(EXTENDED_IMAGE_TAG) |
+ .databaseContainer.dockerImage = "registry.gitlab.com/postgres-ai/custom-images/extended-postgres:" + strenv(POSTGRES_VERSION) + strenv(EXTENDED_IMAGE_TAG) |
.retrieval.spec.physicalRestore.options.envs.PGUSER = strenv(SOURCE_USERNAME) |
.retrieval.spec.physicalRestore.options.envs.PGPASSWORD = strenv(SOURCE_PASSWORD) |
.retrieval.spec.physicalRestore.options.envs.PGHOST = strenv(SOURCE_HOST) |
diff --git a/engine/test/_cleanup.sh b/engine/test/_cleanup.sh
index 6e9ccca6..de5755a0 100644
--- a/engine/test/_cleanup.sh
+++ b/engine/test/_cleanup.sh
@@ -3,7 +3,7 @@ set -euxo pipefail
DLE_TEST_MOUNT_DIR="/var/lib/test/dblab_mount"
DLE_TEST_POOL_NAME="test_dblab_pool"
-TMP_DATA_DIR="/tmp/dle_test/logical_generic"
+TMP_DATA_DIR="/tmp/dle_test"
ZFS_FILE="$(pwd)/zfs_file"
# Stop and remove test Docker containers
diff --git a/ui/package.json b/ui/package.json
index 63a3af14..29908896 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -48,7 +48,29 @@
"elliptic@>=4.0.0 <=6.5.6": ">=6.5.7",
"elliptic@>=2.0.0 <=6.5.6": ">=6.5.7",
"elliptic@>=5.2.1 <=6.5.6": ">=6.5.7",
- "dompurify@<2.5.4": ">=2.5.4"
+ "dompurify@<2.5.4": ">=2.5.4",
+ "http-proxy-middleware@<2.0.7": ">=2.0.7",
+ "nanoid@<3.3.8": "3.3.8",
+ "elliptic@<=6.6.0": ">=6.6.1",
+ "cookie@<0.7.0": ">=0.7.0",
+ "@babel/runtime-corejs3@<7.26.10": ">=7.26.10",
+ "@babel/runtime@<7.26.10": ">=7.26.10",
+ "@babel/helpers@<7.26.10": ">=7.26.10",
+ "http-proxy-middleware@>=1.3.0 <2.0.9": ">=2.0.9",
+ "http-proxy-middleware@>=1.3.0 <2.0.8": ">=2.0.8",
+ "cross-spawn@>=7.0.0 <7.0.5": ">=7.0.5",
+ "path-to-regexp@<0.1.12": ">=0.1.12",
+ "brace-expansion@>=1.0.0 <=1.1.11": "1.1.12",
+ "brace-expansion@>=2.0.0 <=2.0.1": "2.0.2",
+ "brace-expansion@>2.0.2": "2.0.2",
+ "pbkdf2@<=3.1.2": ">=3.1.3",
+ "pbkdf2@>=3.0.10 <=3.1.2": ">=3.1.3",
+ "elliptic@<6.6.0": ">=6.6.0",
+ "prismjs@<1.30.0": ">=1.30.0",
+ "form-data@>=3.0.0 <3.0.4": ">=3.0.4",
+ "form-data@<2.5.4": ">=2.5.4",
+ "on-headers@<1.1.0": ">=1.1.0",
+ "tmp@<=0.2.3": ">=0.2.4"
}
}
}
diff --git a/ui/packages/ce/package.json b/ui/packages/ce/package.json
index 55e54843..00d73fec 100644
--- a/ui/packages/ce/package.json
+++ b/ui/packages/ce/package.json
@@ -1,6 +1,6 @@
{
"name": "@postgres.ai/ce",
- "version": "4.0.0",
+ "version": "4.0.2",
"private": true,
"dependencies": {
"@craco/craco": "^6.4.3",
@@ -26,7 +26,7 @@
"copy-to-clipboard": "^3.3.1",
"create-file-webpack": "^1.0.2",
"crypto-browserify": "^3.12.0",
- "cypress": "^12.15.0",
+ "cypress": "^14.5.4",
"date-fns": "^2.22.1",
"eslint-plugin-cypress": "^2.13.3",
"formik": "^2.2.9",
diff --git a/ui/packages/ce/src/api/snapshots/getSnapshots.ts b/ui/packages/ce/src/api/snapshots/getSnapshots.ts
index b26788eb..16af68a8 100644
--- a/ui/packages/ce/src/api/snapshots/getSnapshots.ts
+++ b/ui/packages/ce/src/api/snapshots/getSnapshots.ts
@@ -13,7 +13,14 @@ import {
import { request } from 'helpers/request'
export const getSnapshots: GetSnapshots = async (req) => {
- const url = `/snapshots${req.branchName ? `?branch=${req.branchName}` : ''}`;
+ const params = new URLSearchParams()
+ if (req.branchName) {
+ params.append('branch', req.branchName)
+ }
+ if (req.dataset) {
+ params.append('dataset', req.dataset)
+ }
+ const url = `/snapshots${params.toString() ? `?${params.toString()}` : ''}`;
const response = await request(url);
return {
diff --git a/ui/packages/shared/.gitlab-ci.yml b/ui/packages/shared/.gitlab-ci.yml
index 15baf4c9..440fa3a5 100644
--- a/ui/packages/shared/.gitlab-ci.yml
+++ b/ui/packages/shared/.gitlab-ci.yml
@@ -69,7 +69,7 @@ publish-shared-release:
- export VERSION=${CI_COMMIT_TAG#"v"}
# Build and pack
- - npm version "$VERSION" --no-git-tag-version
+ - npm version "$VERSION" --no-git-tag-version --allow-same-version
- pnpm run pack
# Publish
diff --git a/ui/packages/shared/components/ResetCloneModal/index.tsx b/ui/packages/shared/components/ResetCloneModal/index.tsx
index 4b278137..744f5ace 100644
--- a/ui/packages/shared/components/ResetCloneModal/index.tsx
+++ b/ui/packages/shared/components/ResetCloneModal/index.tsx
@@ -7,8 +7,6 @@
import { useEffect, useState } from 'react'
import { makeStyles } from '@material-ui/core'
-import { formatDistanceToNowStrict } from 'date-fns'
-
import { Clone } from '@postgres.ai/shared/types/api/entities/clone'
import { Snapshot } from '@postgres.ai/shared/types/api/entities/snapshot'
import { Text } from '@postgres.ai/shared/components/Text'
@@ -18,7 +16,7 @@ import { ImportantText } from '@postgres.ai/shared/components/ImportantText'
import { Spinner } from '@postgres.ai/shared/components/Spinner'
import { SimpleModalControls } from '@postgres.ai/shared/components/SimpleModalControls'
import { compareSnapshotsDesc } from '@postgres.ai/shared/utils/snapshot'
-import { isValidDate } from '@postgres.ai/shared/utils/date'
+import { formatDateWithDistance } from '@postgres.ai/shared/utils/date'
type Props = {
isOpen: boolean
@@ -111,12 +109,7 @@ export const ResetCloneModal = (props: Props) => {
value: snapshot.id,
children: (
<>
- {snapshot.dataStateAt} (
- {isValidDate(snapshot.dataStateAtDate) &&
- formatDistanceToNowStrict(snapshot.dataStateAtDate, {
- addSuffix: true,
- })}
- )
+ {formatDateWithDistance(snapshot.dataStateAt, snapshot.dataStateAtDate)}
{isLatest && (
Latest
)}
diff --git a/ui/packages/shared/package.json b/ui/packages/shared/package.json
index a5fd0980..7efcdbb2 100644
--- a/ui/packages/shared/package.json
+++ b/ui/packages/shared/package.json
@@ -1,6 +1,6 @@
{
"name": "@postgres.ai/shared",
- "version": "4.0.0",
+ "version": "4.0.2",
"scripts": {
"build": "tsc -p tsconfig.build.json && node scripts/copy-assets.js",
"pack": "node scripts/pack.js"
diff --git a/ui/packages/shared/pages/Branches/components/BranchesTable/index.tsx b/ui/packages/shared/pages/Branches/components/BranchesTable/index.tsx
index a8406df2..c67c88db 100644
--- a/ui/packages/shared/pages/Branches/components/BranchesTable/index.tsx
+++ b/ui/packages/shared/pages/Branches/components/BranchesTable/index.tsx
@@ -10,9 +10,7 @@ import { useEffect, useState } from 'react'
import copy from 'copy-to-clipboard'
import { makeStyles } from '@material-ui/core'
import { useHistory } from 'react-router-dom'
-import { formatDistanceToNowStrict } from 'date-fns'
-
-import { isValidDate } from '@postgres.ai/shared/utils/date'
+import { formatDateWithDistance } from '@postgres.ai/shared/utils/date'
import { ArrowDropDownIcon } from '@postgres.ai/shared/icons/ArrowDropDown'
import { Branch } from '@postgres.ai/shared/types/api/endpoints/getBranches'
import { HorizontalScrollContainer } from '@postgres.ai/shared/components/HorizontalScrollContainer'
@@ -168,13 +166,7 @@ export const BranchesTable = ({
{branch.name}{branch.parent}
- {branch.dataStateAt} (
- {isValidDate(new Date(branch.dataStateAt))
- ? formatDistanceToNowStrict(new Date(branch.dataStateAt), {
- addSuffix: true,
- })
- : '-'}
- )
+ {formatDateWithDistance(branch.dataStateAt, new Date(branch.dataStateAt))}
{branch.snapshotID}{branch.numSnapshots}
diff --git a/ui/packages/shared/pages/CreateBranch/index.tsx b/ui/packages/shared/pages/CreateBranch/index.tsx
index 0ae11354..d828d1d9 100644
--- a/ui/packages/shared/pages/CreateBranch/index.tsx
+++ b/ui/packages/shared/pages/CreateBranch/index.tsx
@@ -107,6 +107,7 @@ export const CreateBranchPage = observer(
const classes = useStyles()
const history = useHistory()
const [branchSnapshots, setBranchSnapshots] = useState([])
+ const [selectedBranchKey, setSelectedBranchKey] = useState('main|')
const {
load,
@@ -131,8 +132,8 @@ export const CreateBranchPage = observer(
})
}
- const fetchSnapshots = async (branchName: string) => {
- await getSnapshots(instanceId, branchName).then((response) => {
+ const fetchSnapshots = async (branchName: string, dataset?: string) => {
+ await getSnapshots(instanceId, branchName, dataset).then((response) => {
if (response) {
setBranchSnapshots(response)
formik.setFieldValue('snapshotID', response[0]?.id)
@@ -143,17 +144,28 @@ export const CreateBranchPage = observer(
const handleParentBranchChange = async (
e: React.ChangeEvent,
) => {
- const branchName = e.target.value
+ const compositeKey = e.target.value
+ const [branchName, dataset] = compositeKey.split('|')
+
+ setSelectedBranchKey(compositeKey)
formik.setFieldValue('baseBranch', branchName)
- await fetchSnapshots(branchName)
+ await fetchSnapshots(branchName, dataset)
}
const [{ formik }] = useForm(handleSubmit)
- useEffect(() => {
- load(instanceId)
- fetchSnapshots(formik.values.baseBranch)
- }, [formik.values.baseBranch])
+ useEffect(() => {
+ load(instanceId);
+ }, [instanceId]);
+
+ useEffect(() => {
+ if (!branchesList?.length) return;
+ const selected = branchesList.find(b => b.name === formik.values.baseBranch);
+ if (!selected) return;
+ const compositeKey = `${selected.name}|${selected.baseDataset || ''}`;
+ setSelectedBranchKey(compositeKey);
+ fetchSnapshots(selected.name, selected.baseDataset);
+ }, [branchesList]);
if (isBranchesLoading) {
return
@@ -207,16 +219,20 @@ export const CreateBranchPage = observer(