diff --git a/README.md b/README.md index 9eada025..8ae9f89a 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@
@@ -92,7 +92,7 @@ Read more: - Theoretical max of snapshots/clones: 264 ([ZFS](https://en.wikipedia.org/wiki/ZFS), default) - Maximum size of PostgreSQL data directory: 256 quadrillion zebibytes, or 2128 bytes ([ZFS](https://en.wikipedia.org/wiki/ZFS), default) - Support & technologies - - Supported PostgreSQL versions: 9.6–17 + - Supported PostgreSQL versions: 9.6–18 - Thin cloning ([CoW](https://en.wikipedia.org/wiki/Copy-on-write)) technologies: [ZFS](https://en.wikipedia.org/wiki/ZFS) and [LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)) - UI for manual tasks and API & CLI for automation - Packaged in Docker containers for all components diff --git a/engine/.gitlab-ci.yml b/engine/.gitlab-ci.yml index 7499b869..22c505d9 100644 --- a/engine/.gitlab-ci.yml +++ b/engine/.gitlab-ci.yml @@ -1,6 +1,6 @@ default: image: - name: golang:1.23 + name: golang:1.24 pull_policy: if-not-present stages: @@ -58,7 +58,7 @@ lint: build-binary-alpine: <<: *only_engine image: - name: golang:1.23-alpine + name: golang:1.24-alpine pull_policy: if-not-present stage: build-binary artifacts: @@ -87,8 +87,8 @@ build-binary-client-master: - make build-client # Install google-cloud-sdk. + - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -107,8 +107,8 @@ build-binary-client: - make build-client # Install google-cloud-sdk. + - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -128,8 +128,8 @@ build-binary-client-rc: - make build-client # Install google-cloud-sdk. + - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg - echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - apt-get update && apt-get install -y google-cloud-sdk # Authenticate. @@ -477,6 +477,11 @@ bash-test-17: variables: POSTGRES_VERSION: 17 +bash-test-18: + <<: *bash_test + variables: + POSTGRES_VERSION: 18 + integration-test: services: - name: docker:24-dind diff --git a/engine/Dockerfile.dblab-server-debug b/engine/Dockerfile.dblab-server-debug index af6b1f17..dd8b13c7 100644 --- a/engine/Dockerfile.dblab-server-debug +++ b/engine/Dockerfile.dblab-server-debug @@ -1,7 +1,7 @@ # How to start a container: https://postgres.ai/docs/how-to-guides/administration/engine-manage # Compile stage -FROM golang:1.23 AS build-env +FROM golang:1.24 AS build-env # Build Delve RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/engine/Makefile b/engine/Makefile index 84bf96de..ffb2edf0 100644 --- a/engine/Makefile +++ b/engine/Makefile @@ -34,7 +34,7 @@ help: ## Display the help message all: clean build ## Build all binary components of the project install-lint: ## Install the linter to $GOPATH/bin which is expected to be in $PATH - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.61.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.64.8 run-lint: ## Run linters golangci-lint run diff --git a/engine/configs/standard/postgres/default/18/pg_hba.conf b/engine/configs/standard/postgres/default/18/pg_hba.conf new file mode 100644 index 00000000..cccbfad1 --- /dev/null +++ b/engine/configs/standard/postgres/default/18/pg_hba.conf @@ -0,0 +1,128 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# ---------------------- +# Authentication Records +# ---------------------- +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: +# - "local" is a Unix-domain socket +# - "host" is a TCP/IP socket (encrypted or not) +# - "hostssl" is a TCP/IP socket that is SSL-encrypted +# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted +# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted +# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, a regular expression (if it starts with a slash (/)) +# or a comma-separated list thereof. The "all" keyword does not match +# "replication". Access to replication must be enabled in a separate +# record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", a +# regular expression (if it starts with a slash (/)) or a comma-separated +# list thereof. In both the DATABASE and USER fields you can also write +# a file name prefixed with "@" to include names from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "scram-sha-256", "password", "gss", +# "sspi", "ident", "peer", "pam", "ldap", "radius", "cert" or "oauth". +# Note that "password" sends passwords in clear text; "scram-sha-256" is +# preferred since it sends encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# --------------- +# Include Records +# --------------- +# +# This file allows the inclusion of external files or directories holding +# more records, using the following keywords: +# +# include FILE +# include_if_exists FILE +# include_dir DIRECTORY +# +# FILE is the file name to include, and DIR is the directory name containing +# the file(s) to include. Any file in a directory will be loaded if suffixed +# with ".conf". The files of a directory are ordered by name. +# include_if_exists ignores missing files. FILE and DIRECTORY can be +# specified as a relative or an absolute path, and can be double-quoted if +# they contain spaces. +# +# ------------- +# Miscellaneous +# ------------- +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# ---------------------------------- +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# CAUTION: Configuring the system for local "trust" authentication +# allows any local user to connect as any PostgreSQL user, including +# the database superuser. If you do not trust all your local users, +# use another authentication method. + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all trust +# IPv4 local connections: +host all all 127.0.0.1/32 trust +# IPv6 local connections: +host all all ::1/128 trust +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all trust +host replication all 127.0.0.1/32 trust +host replication all ::1/128 trust + +host all all all scram-sha-256 diff --git a/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf b/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf new file mode 100644 index 00000000..425de11c --- /dev/null +++ b/engine/configs/standard/postgres/default/18/postgresql.dblab.postgresql.conf @@ -0,0 +1,884 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=all". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#reserved_connections = 0 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = scram-sha-256 # scram-sha-256 or md5 +#scram_iterations = 4096 +#md5_password_warnings = on +#oauth_validator_libraries = '' # comma-separated list of trusted validator modules + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off +#gss_accept_delegation = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed TLSv1.2 ciphers +#ssl_tls13_ciphers = '' # allowed TLSv1.3 cipher suites, blank for default +#ssl_prefer_server_ciphers = on +#ssl_groups = 'X25519:prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem +#maintenance_work_mem = 64MB # min 64kB +#autovacuum_work_mem = -1 # min 64kB, or -1 to use maintenance_work_mem +#logical_decoding_work_mem = 64MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is usually the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +#min_dynamic_shared_memory = 0MB # (change requires restart) +#vacuum_buffer_usage_limit = 2MB # size of vacuum and analyze buffer access strategy ring; + # 0 to disable vacuum buffer access strategy; + # range 128kB to 16GB + +# SLRU buffers (change requires restart) +#commit_timestamp_buffers = 0 # memory for pg_commit_ts (0 = auto) +#multixact_offset_buffers = 16 # memory for pg_multixact/offsets +#multixact_member_buffers = 32 # memory for pg_multixact/members +#notify_buffers = 16 # memory for pg_notify +#serializable_buffers = 32 # memory for pg_serial +#subtransaction_buffers = 0 # memory for pg_subtrans (0 = auto) +#transaction_buffers = 0 # memory for pg_xact (0 = auto) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +#file_copy_method = copy # copy, clone (if supported by OS) + +#max_notify_queue_pages = 1048576 # limits the number of SLRU pages allocated + # for NOTIFY / LISTEN queue + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 64 + # (change requires restart) + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - I/O - + +#backend_flush_after = 0 # measured in pages, 0 disables +#effective_io_concurrency = 16 # 1-1000; 0 disables issuing multiple simultaneous IO requests +#maintenance_io_concurrency = 16 # 1-1000; same as effective_io_concurrency +#io_max_combine_limit = 128kB # usually 1-128 blocks (depends on OS) + # (change requires restart) +#io_combine_limit = 128kB # usually 1-128 blocks (depends on OS) + +#io_method = worker # worker, io_uring, sync + # (change requires restart) +#io_max_concurrency = -1 # Max number of IOs that one process + # can execute simultaneously + # -1 sets based on shared_buffers + # (change requires restart) +#io_workers = 3 # 1-32; + +# - Worker Processes - + +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # limited by max_parallel_workers +#max_parallel_maintenance_workers = 2 # limited by max_parallel_workers +#max_parallel_workers = 8 # number of max_worker_processes that + # can be used in parallel operations +#parallel_leader_participation = on + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_compression = off # enables compression of full-page writes; + # off, pglz, lz4, zstd, or on +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 1GB +min_wal_size = 80MB + +# - Prefetching during recovery - + +#recovery_prefetch = try # prefetch pages referenced in the WAL? +#wal_decode_buffer_size = 512kB # lookahead window used for prefetching + # (change requires restart) + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_library = '' # library to use to archive a WAL file + # (empty string indicates archive_command should + # be used) +#archive_command = '' # command to use to archive a WAL file + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a WAL file switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +#restore_command = '' # command to use to restore an archived WAL file + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + +# - WAL Summarization - + +#summarize_wal = off # run WAL summarizer process? +#wal_summary_keep_time = '10d' # when to remove old summary files, 0 = never + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#wal_keep_size = 0 # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#idle_replication_slot_timeout = 0 # in seconds; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#synchronized_standby_slots = '' # streaming replication standby server slot + # names that logical walsender processes will wait for + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery +#sync_replication_slots = off # enables slot synchronization on the physical standby from the primary + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_active_replication_origins = 10 # max number of active replication origins + # (change requires restart) +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers +#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_presorted_aggregate = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on +#enable_group_by_reordering = on +#enable_distinct_reordering = on +#enable_self_join_elimination = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#jit = on # allow JIT compilation +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan +#recursive_worktable_factor = 10.0 # range 0.001-1000000 + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, jsonlog, syslog, and + # eventlog, depending on platform. + # csvlog and jsonlog require + # logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr, jsonlog, + # and csvlog into log files. Required + # to be on for csvlogs and jsonlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +#log_startup_progress_interval = 10s # Time between progress updates for + # long-running startup operations. + # 0 disables the feature, > 0 indicates + # the interval in milliseconds. + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = 10min # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#log_checkpoints = on +#log_connections = '' # log aspects of connection setup + # options include receipt, authentication, authorization, + # setup_durations, and all to log all of these aspects +#log_disconnections = off +#log_duration = off # log statement duration +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %L = local address + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_lock_failures = off # log lock failures +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Etc/UTC' + +# - Process Title - + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Cumulative Query and Index Statistics - + +#track_activities = on +#track_activity_query_size = 1024 # (change requires restart) +#track_counts = on +#track_cost_delay_timing = off +#track_io_timing = off +#track_wal_io_timing = off +#track_functions = none # none, pl, all +#stats_fetch_consistency = cache # cache, none, snapshot + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# VACUUMING +#------------------------------------------------------------------------------ + +# - Automatic Vacuuming - + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +autovacuum_worker_slots = 16 # autovacuum worker slots to allocate + # (change requires restart) +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of unfrozen pages + # before insert vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_vacuum_max_threshold = 100000000 # max number of row updates + # before vacuum; -1 disables max + # threshold +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Default Behavior - + +#vacuum_truncate = on # enable truncation after vacuum + +# - Freezing - + +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#vacuum_max_eager_freeze_failure_rate = 0.03 # 0 disables eager scanning + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +#default_toast_compression = 'pglz' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#transaction_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB +#createrole_self_grant = '' # set and/or inherit +#event_triggers = on + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'Etc/UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.utf8' # locale for system error message + # strings +lc_monetary = 'en_US.utf8' # locale for monetary formatting +lc_numeric = 'en_US.utf8' # locale for number formatting +lc_time = 'en_US.utf8' # locale for time formatting + +#icu_validation_level = warning # report ICU locale validation + # errors at the given level + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +#shared_preload_libraries = '' # (change requires restart) +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_control_path = '$system' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off +#allow_alter_system = on + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/engine/go.mod b/engine/go.mod index e851a499..d9b4c811 100644 --- a/engine/go.mod +++ b/engine/go.mod @@ -1,6 +1,6 @@ module gitlab.com/postgres-ai/database-lab/v3 -go 1.23.12 +go 1.24.7 require ( github.com/AlekSi/pointer v1.2.0 diff --git a/engine/go.sum b/engine/go.sum index 8399d633..2a847cb5 100644 --- a/engine/go.sum +++ b/engine/go.sum @@ -310,8 +310,6 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= -github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= diff --git a/engine/internal/cloning/base.go b/engine/internal/cloning/base.go index be3e2b20..952a9436 100644 --- a/engine/internal/cloning/base.go +++ b/engine/internal/cloning/base.go @@ -435,8 +435,10 @@ func (c *Base) refreshCloneMetadata(w *CloneWrapper) { return } + c.cloneMutex.Lock() w.Clone.Metadata.CloneDiffSize = sessionState.CloneDiffSize w.Clone.Metadata.LogicalSize = sessionState.LogicalReferenced + c.cloneMutex.Unlock() } // UpdateClone updates clone. @@ -527,11 +529,6 @@ func (c *Base) ResetClone(cloneID string, resetOptions types.ResetCloneRequest) log.Warn("clone has dependent snapshots", cloneID) c.cloneMutex.Lock() w.Clone.Revision++ - w.Clone.HasDependent = true - c.cloneMutex.Unlock() - } else { - c.cloneMutex.Lock() - w.Clone.HasDependent = false c.cloneMutex.Unlock() } @@ -630,6 +627,8 @@ func (c *Base) GetClones() []*models.Clone { clones := make([]*models.Clone, 0, c.lenClones()) c.cloneMutex.RLock() + requestsByPool := make(map[string][]resources.SessionStateRequest) + for _, cloneWrapper := range c.clones { if cloneWrapper.Clone.Snapshot != nil { snapshot, err := c.getSnapshotByID(cloneWrapper.Clone.Snapshot.ID) @@ -642,12 +641,30 @@ func (c *Base) GetClones() []*models.Clone { } } - c.refreshCloneMetadata(cloneWrapper) + if cloneWrapper.Session != nil && cloneWrapper.Clone != nil { + pool := cloneWrapper.Session.Pool + requestsByPool[pool] = append(requestsByPool[pool], resources.SessionStateRequest{ + CloneID: cloneWrapper.Clone.ID, + Branch: cloneWrapper.Clone.Branch, + }) + } clones = append(clones, cloneWrapper.Clone) } c.cloneMutex.RUnlock() + sessionStates, err := c.provision.GetBatchSessionState(requestsByPool) + if err != nil { + log.Err("failed to get batch session states: ", err) + } + + for _, clone := range clones { + if state, ok := sessionStates[clone.ID]; ok { + clone.Metadata.CloneDiffSize = state.CloneDiffSize + clone.Metadata.LogicalSize = state.LogicalReferenced + } + } + sort.Slice(clones, func(i, j int) bool { return clones[i].CreatedAt.After(clones[j].CreatedAt.Time) }) diff --git a/engine/internal/provision/mode_local.go b/engine/internal/provision/mode_local.go index 1d9e4958..e457e859 100644 --- a/engine/internal/provision/mode_local.go +++ b/engine/internal/provision/mode_local.go @@ -274,10 +274,8 @@ func (p *Provisioner) ResetSession(session *resources.Session, clone *models.Clo return nil, errors.Wrap(err, "failed to stop container") } - if clone.Revision == branching.DefaultRevision || !clone.HasDependent { - if err = fsm.DestroyClone(clone.Branch, name, clone.Revision); err != nil { - return nil, errors.Wrap(err, "failed to destroy clone") - } + if err = fsm.DestroyClone(clone.Branch, name, clone.Revision); err != nil { + return nil, errors.Wrap(err, "failed to destroy clone") } if err = newFSManager.CreateClone(clone.Branch, name, snapshot.ID, clone.Revision); err != nil { @@ -300,9 +298,14 @@ func (p *Provisioner) ResetSession(session *resources.Session, clone *models.Clo } snapshotModel := &models.Snapshot{ - ID: snapshot.ID, - CreatedAt: models.NewLocalTime(snapshot.CreatedAt), - DataStateAt: models.NewLocalTime(snapshot.DataStateAt), + ID: snapshot.ID, + CreatedAt: models.NewLocalTime(snapshot.CreatedAt), + DataStateAt: models.NewLocalTime(snapshot.DataStateAt), + PhysicalSize: snapshot.Used, + LogicalSize: snapshot.LogicalReferenced, + Pool: snapshot.Pool, + Branch: snapshot.Branch, + Message: snapshot.Message, } return snapshotModel, nil @@ -335,6 +338,31 @@ func (p *Provisioner) GetSessionState(s *resources.Session, branch, cloneID stri return fsm.GetSessionState(branch, cloneID) } +// GetBatchSessionState retrieves session states for multiple clones efficiently. +func (p *Provisioner) GetBatchSessionState(batch map[string][]resources.SessionStateRequest) (map[string]resources.SessionState, error) { + batchResults := make(map[string]resources.SessionState) + + for poolName, reqs := range batch { + fsm, err := p.pm.GetFSManager(poolName) + if err != nil { + log.Err(fmt.Sprintf("failed to find filesystem manager for pool %s: %v", poolName, err)) + continue + } + + results, err := fsm.GetBatchSessionState(reqs) + if err != nil { + log.Err(fmt.Sprintf("failed to get batch session state for pool %s: %v", poolName, err)) + continue + } + + for cloneID, state := range results { + batchResults[cloneID] = state + } + } + + return batchResults, nil +} + // GetPoolEntryList provides an ordered list of available pools. func (p *Provisioner) GetPoolEntryList() []models.PoolEntry { fsmList := p.pm.GetFSManagerOrderedList() @@ -604,10 +632,8 @@ func (p *Provisioner) CleanupCloneDataset(clone *models.Clone, pool string) erro return nil } - if clone.Revision == branching.DefaultRevision && !clone.HasDependent { - if err := fsm.DestroyDataset(branching.CloneDataset(pool, clone.Branch, clone.ID)); err != nil { - return fmt.Errorf("failed to destroy clone dataset: %w", err) - } + if err = fsm.DestroyClone(clone.Branch, clone.ID, clone.Revision); err != nil { + return fmt.Errorf("failed to destroy clone: %w", err) } return nil diff --git a/engine/internal/provision/mode_local_test.go b/engine/internal/provision/mode_local_test.go index 72c70e13..9183ec2a 100644 --- a/engine/internal/provision/mode_local_test.go +++ b/engine/internal/provision/mode_local_test.go @@ -102,6 +102,10 @@ func (m mockFSManager) GetSessionState(_, _ string) (*resources.SessionState, er return nil, nil } +func (m mockFSManager) GetBatchSessionState(_ []resources.SessionStateRequest) (map[string]resources.SessionState, error) { + return make(map[string]resources.SessionState), nil +} + func (m mockFSManager) GetFilesystemState() (models.FileSystem, error) { return models.FileSystem{Mode: "zfs"}, nil } @@ -214,6 +218,14 @@ func (m mockFSManager) KeepRelation(_ string) error { return nil } +func (m mockFSManager) GetDatasetOrigins(_ string) []string { + return nil +} + +func (m mockFSManager) GetActiveDatasets(_ string) ([]string, error) { + return nil, nil +} + func TestBuildPoolEntry(t *testing.T) { testCases := []struct { pool *resources.Pool diff --git a/engine/internal/provision/pool/manager.go b/engine/internal/provision/pool/manager.go index 1c63a6a2..d07fa242 100644 --- a/engine/internal/provision/pool/manager.go +++ b/engine/internal/provision/pool/manager.go @@ -39,6 +39,7 @@ type Cloner interface { // StateReporter describes methods of state reporting. type StateReporter interface { GetSessionState(branch, name string) (*resources.SessionState, error) + GetBatchSessionState(requests []resources.SessionStateRequest) (map[string]resources.SessionState, error) GetFilesystemState() (models.FileSystem, error) } @@ -57,7 +58,7 @@ type Branching interface { VerifyBranchMetadata() error CreateDataset(datasetName string) error CreateBranch(branchName, snapshotID string) error - DestroyDataset(branchName string) (err error) + DestroyDataset(dataset string) (err error) ListBranches() (map[string]string, error) ListAllBranches(filterPools []string) ([]models.BranchEntity, error) GetRepo() (*models.Repo, error) @@ -78,6 +79,8 @@ type Branching interface { Reset(snapshotID string, options thinclones.ResetOptions) error HasDependentEntity(snapshotName string) ([]string, error) KeepRelation(snapshotName string) error + GetDatasetOrigins(snapshotName string) []string + GetActiveDatasets(dataset string) ([]string, error) } // Pooler describes methods for Pool providing. diff --git a/engine/internal/provision/resources/resources.go b/engine/internal/provision/resources/resources.go index 1a5538ee..5892bb76 100644 --- a/engine/internal/provision/resources/resources.go +++ b/engine/internal/provision/resources/resources.go @@ -48,3 +48,9 @@ type SessionState struct { CloneDiffSize uint64 LogicalReferenced uint64 } + +// SessionStateRequest defines a request for batch session state retrieval. +type SessionStateRequest struct { + CloneID string + Branch string +} diff --git a/engine/internal/provision/thinclones/lvm/lvmanager.go b/engine/internal/provision/thinclones/lvm/lvmanager.go index 8afc4c74..c7fc0d59 100644 --- a/engine/internal/provision/thinclones/lvm/lvmanager.go +++ b/engine/internal/provision/thinclones/lvm/lvmanager.go @@ -136,6 +136,11 @@ func (m *LVManager) GetSessionState(_, _ string) (*resources.SessionState, error return &resources.SessionState{}, nil } +// GetBatchSessionState is not implemented. +func (m *LVManager) GetBatchSessionState(_ []resources.SessionStateRequest) (map[string]resources.SessionState, error) { + return make(map[string]resources.SessionState), nil +} + // GetFilesystemState is not implemented. func (m *LVManager) GetFilesystemState() (models.FileSystem, error) { // TODO(anatoly): Implement. @@ -316,3 +321,17 @@ func (m *LVManager) KeepRelation(_ string) error { return nil } + +// GetDatasetOrigins provides a list of dataset origins. +func (m *LVManager) GetDatasetOrigins(_ string) []string { + log.Msg("GetDatasetOrigins is not supported for LVM. Skip the operation") + + return nil +} + +// GetActiveDatasets provides a list of active datasets. +func (m *LVManager) GetActiveDatasets(_ string) ([]string, error) { + log.Msg("GetDatasetOrigins is not supported for LVM. Skip the operation") + + return nil, nil +} diff --git a/engine/internal/provision/thinclones/zfs/branching.go b/engine/internal/provision/thinclones/zfs/branching.go index 601fa095..f2ed6666 100644 --- a/engine/internal/provision/thinclones/zfs/branching.go +++ b/engine/internal/provision/thinclones/zfs/branching.go @@ -267,13 +267,15 @@ func (m *Manager) ListAllBranches(poolList []string) ([]models.BranchEntity, err continue } + dataset := branching.ParseBaseDatasetFromSnapshot(fields[1]) + if !strings.Contains(fields[0], branchSep) { - branches = append(branches, models.BranchEntity{Name: fields[0], SnapshotID: fields[1]}) + branches = append(branches, models.BranchEntity{Name: fields[0], Dataset: dataset, SnapshotID: fields[1]}) continue } for _, branchName := range strings.Split(fields[0], branchSep) { - branches = append(branches, models.BranchEntity{Name: branchName, SnapshotID: fields[1]}) + branches = append(branches, models.BranchEntity{Name: branchName, Dataset: dataset, SnapshotID: fields[1]}) } } @@ -539,22 +541,6 @@ func (m *Manager) HasDependentEntity(snapshotName string) ([]string, error) { dependentClones := strings.Split(clones, ",") - // Check clones of dependent snapshots. - if child != "" { - // check all child snapshots - childList := strings.Split(child, ",") - - for _, childSnapshot := range childList { - // TODO: limit the max level of recursion. - childClones, err := m.HasDependentEntity(childSnapshot) - if err != nil { - return nil, fmt.Errorf("failed to check dependent clones of dependent snapshots: %w", err) - } - - dependentClones = append(dependentClones, childClones...) - } - } - return dependentClones, nil } diff --git a/engine/internal/provision/thinclones/zfs/zfs.go b/engine/internal/provision/thinclones/zfs/zfs.go index 4672ea64..bc55fb68 100644 --- a/engine/internal/provision/thinclones/zfs/zfs.go +++ b/engine/internal/provision/thinclones/zfs/zfs.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "fmt" "path" + "sort" "strconv" "strings" "sync" @@ -235,13 +236,27 @@ func (m *Manager) DestroyClone(branchName, cloneName string, revision int) error return nil } + cloneDataset := m.config.Pool.CloneDataset(branchName, cloneName) + cloneOrigins := m.GetDatasetOrigins(cloneDataset) + + if m.hasDependentSnapshots(cloneOrigins, cloneMountName) { + log.Msg(fmt.Sprintf("clone %q has dependent snapshot; skipping", cloneMountName)) + return nil + } + + // TODO: check pre-clone for physical mode. + if len(cloneOrigins) <= branching.MinDatasetNumber { + // There are no other revisions, so we can destroy the entire clone dataset. + cloneMountName = cloneDataset + } + // Delete the clone and all snapshots and clones depending on it. // TODO(anatoly): right now, we are using this function only for // deleting thin clones created by users. If we are going to use // this function to delete clones used during the preparation // of baseline snapshots, we need to omit `-R`, to avoid // unexpected deletion of users' clones. - cmd := fmt.Sprintf("zfs destroy %s", cloneMountName) + cmd := fmt.Sprintf("zfs destroy -r %s", cloneMountName) if _, err = m.runner.Run(cmd); err != nil { if strings.Contains(cloneName, "clone_pre") { @@ -254,6 +269,59 @@ func (m *Manager) DestroyClone(branchName, cloneName string, revision int) error return nil } +func (m *Manager) GetDatasetOrigins(cloneDataset string) []string { + listZfsClonesCmd := "zfs list -H -o origin -r " + cloneDataset + + out, err := m.runner.Run(listZfsClonesCmd, false) + if err != nil { + log.Warn(fmt.Sprintf("failed to check clone dataset %s: %v", cloneDataset, err)) + return nil + } + + lines := strings.Split(strings.TrimSpace(out), "\n") + + return lines +} + +func (m *Manager) GetActiveDatasets(cloneDataset string) ([]string, error) { + listZfsClonesCmd := fmt.Sprintf("zfs list -t snapshot -H -o name -r %s | grep %s", m.config.Pool.Name, cloneDataset) + + out, err := m.runner.Run(listZfsClonesCmd, false) + if err != nil { + log.Dbg(fmt.Sprintf("no active datasets %s: %v", cloneDataset, err)) + } + + lines := strings.Split(strings.TrimSpace(out), "\n") + + datasetRegistry := make([]string, 0, len(lines)) + + for _, line := range lines { + name := strings.TrimSpace(line) + if name == "" || name == empty { + continue + } + + datasetRegistry = append(datasetRegistry, name) + } + + return datasetRegistry, nil +} + +func (m *Manager) hasDependentSnapshots(origins []string, cloneMountName string) bool { + for _, name := range origins { + if name == empty { + continue + } + + if strings.HasPrefix(name, cloneMountName) { + log.Dbg(fmt.Sprintf("%s has dependent snapshot %s", cloneMountName, name)) + return true + } + } + + return false +} + // cloneExists checks whether a ZFS clone exists. func (m *Manager) cloneExists(name string) (bool, error) { listZfsClonesCmd := "zfs list -r " + m.config.Pool.Name @@ -532,13 +600,128 @@ func (m *Manager) CleanupSnapshots(retentionLimit int) ([]string, error) { return nil, errors.Wrap(err, "failed to clean up snapshots") } + if err := m.cleanupEmptyDatasets(clonesOutput); err != nil { + return nil, fmt.Errorf("failed to clean up empty datasets: %w", err) + } + lines := strings.Split(out, "\n") m.RefreshSnapshotList() + firstSnapshotID := "" + + m.mu.Lock() + if l := len(m.snapshots); l > 0 { + firstSnapshotID = m.snapshots[l-1].ID + } + m.mu.Unlock() + + m.reviewParentProperty(firstSnapshotID) + return lines, nil } +func (m *Manager) reviewParentProperty(snapshotID string) { + if snapshotID == "" { + return + } + + parent, err := m.getProperty(parentProp, snapshotID) + if err != nil { + log.Err("failed to review parent property:", err) + + return + } + + if parent == "" { + return + } + + _, err = m.GetSnapshotProperties(parent) + if err != nil { + // Parent snapshot not found, clean up the property. + if err = m.setParent("", snapshotID); err != nil { + log.Err(err) + } + } +} + +func (m *Manager) cleanupEmptyDatasets(clonesOutput string) error { + datasetsToRemove := m.getEmptyDatasets(clonesOutput) + + for _, dataset := range datasetsToRemove { + log.Dbg("Remove empty dataset: ", dataset) + + if err := m.DestroyDataset(dataset); err != nil { + return fmt.Errorf("failed to destroy dataset %s: %w", dataset, err) + } + } + + return nil +} + +func (m *Manager) getEmptyDatasets(clonesOutput string) []string { + const outputParts = 2 + + lines := strings.Split(strings.TrimSpace(clonesOutput), "\n") + + allDatasets := make(map[string]struct{}) + emptyDatasets := []string{} + + for _, line := range lines { + if line == "" { + continue + } + + parts := strings.Fields(line) + if len(parts) != outputParts { + continue + } + + dataset := parts[0] + origin := parts[1] + + // Skip branch datasets (only process clones) + //