diff --git a/.circleci/config.yml b/.circleci/config.yml index 34322d23d..2b3bc3a01 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ commands: parameters: docker-img: type: 'string' - default: 'docker.io/arangodb/arangodb:latest' + default: 'docker.io/arangodb/enterprise:latest' topology: type: 'string' default: 'single' @@ -109,13 +109,6 @@ commands: - run: name: Deploy to Apache Maven Central command: mvn -s .circleci/maven-release-settings.xml -Dmaven.test.skip deploy - release: - steps: - - run: - name: Release to Apache Maven Central - command: mvn -s .circleci/maven-release-settings.xml -Dmaven.test.skip nexus-staging:release - environment: - MAVEN_OPTS: "--add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.text=ALL-UNNAMED --add-opens=java.desktop/java.awt.font=ALL-UNNAMED" executors: j17: @@ -124,6 +117,9 @@ executors: j21: docker: - image: 'cimg/openjdk:21.0' + j25: + docker: + - image: 'cimg/openjdk:25.0' jobs: @@ -131,13 +127,13 @@ jobs: parameters: docker-img: type: 'string' - default: 'docker.io/arangodb/arangodb:latest' + default: 'docker.io/arangodb/enterprise:latest' topology: type: 'string' default: 'single' jdk: type: 'string' - default: 'j21' + default: 'j25' args: type: 'string' default: '' @@ -149,7 +145,7 @@ jobs: default: 'false' graalvm-version: type: 'string' - default: '21.0.2-graalce' + default: '25-graalce' resource_class: type: 'string' default: 'medium' @@ -170,6 +166,9 @@ jobs: docker-img: <> topology: <> ssl: <> + - run: + name: Start proxy + command: ./docker/start_proxy.sh - load_cache - run: name: mvn dependency:tree @@ -181,7 +180,7 @@ jobs: - run: name: Test command: | - mvn verify -am -pl test-functional -Dgpg.skip -Dmaven.javadoc.skip \ + mvn -Dorg.slf4j.simpleLogger.showDateTime verify -am -pl test-functional -Dgpg.skip -Dmaven.javadoc.skip \ -Dssl=<> \ -Dnative=<> \ <> @@ -196,13 +195,13 @@ jobs: parameters: docker-img: type: 'string' - default: 'docker.io/arangodb/arangodb:latest' + default: 'docker.io/arangodb/enterprise:latest' topology: type: 'string' default: 'single' jdk: type: 'string' - default: 'j21' + default: 'j25' args: type: 'string' default: '' @@ -214,7 +213,7 @@ jobs: default: 'false' graalvm-version: type: 'string' - default: '21.0.2-graalce' + default: '25-graalce' resource_class: type: 'string' default: 'medium' @@ -235,6 +234,9 @@ jobs: docker-img: <> topology: <> ssl: <> + - run: + name: Start proxy + command: ./docker/start_proxy.sh - load_cache - install - run: @@ -250,7 +252,7 @@ jobs: name: Test working_directory: test-functional command: | - mvn verify -Dgpg.skip \ + mvn -Dorg.slf4j.simpleLogger.showDateTime verify -Dgpg.skip \ -Dshaded \ -Dssl=<> \ -Dnative=<> \ @@ -260,14 +262,14 @@ jobs: - store_cache test-non-func: - executor: 'j21' + executor: 'j25' steps: - timeout - checkout - setup_remote_docker - install-sdk: sdk: 'java' - version: '21.0.2-graalce' + version: '25-graalce' - start-db - load_cache - run: @@ -275,7 +277,7 @@ jobs: command: mvn dependency:tree -am -pl test-non-functional - run: name: Test - command: mvn verify -am -pl test-non-functional -Dgpg.skip -Dmaven.javadoc.skip + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -am -pl test-non-functional -Dgpg.skip -Dmaven.javadoc.skip - report: working_directory: test-non-functional - store_cache @@ -284,14 +286,14 @@ jobs: # https://issues.apache.org/jira/browse/MSHADE-206 # https://issues.apache.org/jira/browse/MNG-5899 test-non-func-shaded: - executor: 'j21' + executor: 'j25' steps: - timeout - checkout - setup_remote_docker - install-sdk: sdk: 'java' - version: '21.0.2-graalce' + version: '25-graalce' - start-db - load_cache - install @@ -302,7 +304,7 @@ jobs: - run: name: Test working_directory: test-non-functional - command: mvn verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded - report: working_directory: test-non-functional - store_cache @@ -321,10 +323,10 @@ jobs: key: sonar-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} - run: name: Test - command: mvn verify -am -pl test-functional -Pstatic-code-analysis -Dgpg.skip -Dmaven.javadoc.skip + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -am -pl test-functional -Pstatic-code-analysis -Dgpg.skip -Dmaven.javadoc.skip - run: name: Analyze - command: mvn verify -Pstatic-code-analysis -Dmaven.test.skip -Dgpg.skip -Dmaven.javadoc.skip org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=arangodb_arangodb-java-driver + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -Pstatic-code-analysis -Dmaven.test.skip -Dgpg.skip -Dmaven.javadoc.skip org.sonarsource.scanner.maven:sonar-maven-plugin:sonar -Dsonar.projectKey=arangodb_arangodb-java-driver - save_cache: name: Save Sonar cache key: sonar-{{ .Environment.CIRCLE_JOB }}-{{ checksum "/tmp/maven_cache_seed" }} @@ -354,7 +356,7 @@ jobs: - store_cache resilience-test: - executor: 'j21' + executor: 'j25' resource_class: 'large' steps: - timeout @@ -376,7 +378,7 @@ jobs: command: mvn dependency:tree -am -pl test-resilience - run: name: Test - command: mvn verify -am -pl test-resilience -Dgpg.skip -Dmaven.javadoc.skip + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -am -pl test-resilience -Dgpg.skip -Dmaven.javadoc.skip - report: working_directory: test-resilience - store_cache @@ -385,7 +387,7 @@ jobs: # https://issues.apache.org/jira/browse/MSHADE-206 # https://issues.apache.org/jira/browse/MNG-5899 resilience-test-shaded: - executor: 'j21' + executor: 'j25' resource_class: 'large' steps: - timeout @@ -410,7 +412,7 @@ jobs: - run: name: Test working_directory: test-resilience - command: mvn verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded + command: mvn -Dorg.slf4j.simpleLogger.showDateTime verify -Dgpg.skip -Dmaven.javadoc.skip -Dshaded - report: working_directory: test-resilience - store_cache @@ -426,18 +428,6 @@ jobs: - deploy - store_cache - release: - executor: 'j17' - steps: - - timeout: - duration: '30m' - - checkout - - load_cache - - config_gpg - - deploy - - release - - store_cache - workflows: test-adb-version: when: @@ -448,9 +438,6 @@ workflows: matrix: parameters: docker-img: - - 'docker.io/arangodb/arangodb:3.11' - - 'docker.io/arangodb/arangodb:3.12' - - 'docker.io/arangodb/enterprise:3.11' - 'docker.io/arangodb/enterprise:3.12' topology: - 'single' @@ -461,9 +448,6 @@ workflows: matrix: parameters: docker-img: - - 'docker.io/arangodb/arangodb:3.11' - - 'docker.io/arangodb/arangodb:3.12' - - 'docker.io/arangodb/enterprise:3.11' - 'docker.io/arangodb/enterprise:3.12' topology: - 'cluster' @@ -518,34 +502,37 @@ workflows: jdk: - 'j17' - 'j21' + - 'j25' filters: tags: only: /^v.*/ branches: only: - main + - next - test: name: test-jackson-<> matrix: parameters: args: - - '-Dadb.jackson.version=2.18.0' - - '-Dadb.jackson.version=2.17.2' + - '-Dadb.jackson.version=2.20.0' + - '-Dadb.jackson.version=2.19.2' + - '-Dadb.jackson.version=2.18.4' + - '-Dadb.jackson.version=2.17.3' - '-Dadb.jackson.version=2.16.2' - '-Dadb.jackson.version=2.15.4' - '-Dadb.jackson.version=2.14.3' - '-Dadb.jackson.version=2.13.5' - '-Dadb.jackson.version=2.12.7' - - '-Dadb.jackson.version=2.11.4' - - '-Dadb.jackson.version=2.10.5' filters: tags: only: /^v.*/ branches: only: - main + - next - test: - name: test-native-ssl=<> + name: test-native-ssl=<>-<> matrix: parameters: native: @@ -556,7 +543,7 @@ workflows: - 'true' - 'false' graalvm-version: - - '22.0.1-graalce' + - '25-graalce' - '21.0.2-graalce' filters: tags: @@ -564,8 +551,9 @@ workflows: branches: only: - main + - next - test-shaded: - name: test-native-shaded-ssl=<> + name: test-native-shaded-ssl=<>-<> matrix: parameters: native: @@ -576,7 +564,7 @@ workflows: - 'true' - 'false' graalvm-version: - - '22.0.1-graalce' + - '25-graalce' - '21.0.2-graalce' filters: tags: @@ -584,21 +572,7 @@ workflows: branches: only: - main - - test: - name: test-activefailover-<> - matrix: - parameters: - docker-img: - - 'docker.io/arangodb/arangodb:3.11' - - 'docker.io/arangodb/enterprise:3.11' - topology: - - 'activefailover' - filters: - tags: - only: /^v.*/ - branches: - only: - - main + - next test-non-func: when: @@ -640,13 +614,3 @@ workflows: only: /^deploy.*/ branches: ignore: /.*/ - - release: - jobs: - - release: - context: java-release - filters: - tags: - only: /^release.*/ - branches: - ignore: /.*/ diff --git a/.circleci/maven-release-settings.xml b/.circleci/maven-release-settings.xml index be5b9aef8..d8e10fc5d 100644 --- a/.circleci/maven-release-settings.xml +++ b/.circleci/maven-release-settings.xml @@ -3,7 +3,7 @@ - ossrh + central true @@ -16,9 +16,9 @@ - ossrh - ${env.OSSRH_USERNAME} - ${env.OSSRH_PASSWORD} + central + ${env.CENTRAL_USERNAME} + ${env.CENTRAL_PASSWORD} diff --git a/ChangeLog.md b/ChangeLog.md index 9cad67091..a08ed86da 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -6,6 +6,91 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) a ## [Unreleased] +## [7.23.0] - 2025-11-04 + +- increased default HTTP/2 window size (#616, DE-1080) +- made HTTP/2 window size configurable (#616, DE-1080) + +## [7.22.1] - 2025-10-09 + +- added Java 25 to test matrix (#614) +- fixed native image in GraalVM 25 (#614) +- update Vert.x to version 4.5.21 (#614) +- updated Jackson to version `2.20.0` (#613) + +## [7.22.0] - 2025-08-06 + +- wildcard generic AQL bind vars (#612, DE-991) + +## [7.21.0] - 2025-07-23 + +- added SSL configuration properties (DE-1010, #611) +- fixed support to Jackson `2.19` + +## [7.20.0] - 2025-06-17 + +- added option `usePlanCache` to `AqlQueryOptions` (DE-973, #609) +- updated Jackson version to `2.19` (DE-1012, #607) + +## [7.19.0] - 2025-05-28 + +- fixed connection pool load-balancing (DE-1016, #602), now the connection pool: + - keeps track of busy connections (or busy HTTP/2 streams) + - enqueues new requests only to connections that are not busy (or that have available HTTP/2 streams) + - waits asynchronously if all the connections are busy (or all HTTP/2 streams are busy) +- added new option to configure HTTP/1.1 pipelining (`com.arangodb.ArangoDB.Builder.pipelining(Boolean)`), + `false` by default +- changed default configuration HTTP/1.1 pipelining to `false` + +## [7.18.0] - 2025-05-06 + +- updated `jackson-dataformat-velocypack` to version `4.6.0` +- exposed configuration properties keys in `ArangoConfigProperties` +- deprecated `CollectionStatus` +- fixed `equals()` and `hashCode()` in some entity classes + +## [7.17.1] - 2025-03-27 + +- implemented `equals()` and `hashCode()` for all entity classes +- fixed overlapping resources in shaded package + +## [7.17.0] - 2025-01-27 + +- allow construct ArangoConfigProperties from `java.util.Properties` (DE-976) +- made BaseDocument and BaseEdgeDocument serializable (#596) + +## [7.16.0] - 2025-01-09 + +- improved deserialization of `RawBytes` and `RawJson` (#592, DE-969) +- added support to Jakarta JSON-P data types (#593, DE-968) +- fixed ArangoSearch `PrimarySort` serialization + +## [7.15.0] - 2024-12-10 + +- added missing collection options (#590, DE-961) +- improved serde performances (#588, DE-959) + +## [7.14.0] - 2024-12-06 + +- support all AQL query options in `ArangoDatabase.explainAqlQuery()` (#589, ES-2266) + +## [7.13.1] - 2024-11-29 + +- tolerate error responses with text content-type (#587, DE-960) + +## [7.13.0] - 2024-11-15 + +- improved serialization and deserialization of `RawBytes` and `RawJson` (#586) + +## [7.12.0] - 2024-11-07 + +- added new method `ArangoDatabase.explainAqlQuery()`, supporting arbitrary JSON-like response data +- deprecated `ArangoDatabase.explainQuery()` + +## [7.11.0] - 2024-10-31 + +- added support to HTTP proxies (#584, DE-930) + ## [7.10.0] - 2024-10-22 - udpated Jackson to version `2.18` (#581, DE-877) diff --git a/README.md b/README.md index 2c4df88db..86ab53472 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,5 @@ The official [ArangoDB](https://www.arangodb.com/) Java Driver. - [ChangeLog](ChangeLog.md) - [Examples](test-non-functional/src/test/java/example) -- [Tutorial](https://university.arangodb.com/courses/java-driver-tutorial-v7/) -- [Documentation](https://docs.arangodb.com/stable/develop/drivers/java/) +- [Documentation and Tutorial](https://docs.arangodb.com/stable/develop/drivers/java/) - [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) diff --git a/core/pom.xml b/core/pom.xml index 4eace9bdd..088f934d0 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 core @@ -17,7 +16,6 @@ com.arangodb.core - false @@ -41,6 +39,11 @@ jackson-annotations compile + + com.fasterxml.jackson.datatype + jackson-datatype-jakarta-jsonp + compile + com.google.code.findbugs jsr305 diff --git a/core/src/main/java/com/arangodb/ArangoDB.java b/core/src/main/java/com/arangodb/ArangoDB.java index ea822d89e..3a5635cb0 100644 --- a/core/src/main/java/com/arangodb/ArangoDB.java +++ b/core/src/main/java/com/arangodb/ArangoDB.java @@ -356,6 +356,7 @@ public interface ArangoDB extends ArangoSerdeAccessor { /** * Reset the server log levels * Revert the server's log level settings to the values they had at startup, as determined by the startup options specified on the command-line, a configuration file, and the factory defaults. + * * @since ArangoDB 3.12 */ LogLevelEntity resetLogLevels(LogLevelOptions options); @@ -484,6 +485,39 @@ public Builder useSsl(final Boolean useSsl) { return this; } + /** + * Sets the SSL certificate value as Base64 encoded String + * + * @param sslCertValue the SSL certificate value as Base64 encoded String + * @return {@link ArangoDB.Builder} + */ + public Builder sslCertValue(final String sslCertValue) { + config.setSslCertValue(sslCertValue); + return this; + } + + /** + * Sets the SSL Trust manager algorithm + * + * @param sslAlgorithm the name of the SSL Trust manager algorithm + * @return {@link ArangoDB.Builder} + */ + public Builder sslAlgorithm(final String sslAlgorithm) { + config.setSslAlgorithm(sslAlgorithm); + return this; + } + + /** + * Sets the SSLContext protocol, default: {@code TLS} + * + * @param sslProtocol the name of the SSLContext protocol + * @return {@link ArangoDB.Builder} + */ + public Builder sslProtocol(final String sslProtocol) { + config.setSslProtocol(sslProtocol); + return this; + } + /** * Sets the SSL context to be used when {@code true} is passed through {@link #useSsl(Boolean)}. * @@ -517,6 +551,39 @@ public Builder chunkSize(final Integer chunkSize) { return this; } + /** + * Set whether to use requests pipelining in HTTP/1.1 ({@link Protocol#HTTP_JSON} or {@link Protocol#HTTP_VPACK}). + * + * @param pipelining {@code true} if enabled + * @return {@link ArangoDB.Builder} + */ + public Builder pipelining(final Boolean pipelining) { + config.setPipelining(pipelining); + return this; + } + + /** + * Sets the size of the connection window for HTTP/2. + * + * @param connectionWindowSize size in bytes + * @return {@link ArangoDB.Builder} + */ + public Builder connectionWindowSize(final Integer connectionWindowSize) { + config.setConnectionWindowSize(connectionWindowSize); + return this; + } + + /** + * Sets the initial window size for HTTP/2 streams. + * + * @param initialWindowSize size in bytes + * @return {@link ArangoDB.Builder} + */ + public Builder initialWindowSize(final Integer initialWindowSize) { + config.setInitialWindowSize(initialWindowSize); + return this; + } + /** * Sets the maximum number of connections the built in connection pool will open per host. * @@ -528,6 +595,8 @@ public Builder chunkSize(final Integer chunkSize) { * {@link Protocol#VST} == 1 * {@link Protocol#HTTP_JSON} == 20 * {@link Protocol#HTTP_VPACK} == 20 + * {@link Protocol#HTTP2_JSON} == 1 + * {@link Protocol#HTTP2_VPACK} == 1 * * * @param maxConnections max number of connections @@ -703,6 +772,7 @@ public Builder compressionLevel(Integer level) { /** * Configuration specific for {@link com.arangodb.internal.net.ProtocolProvider}. + * * @return {@link ArangoDB.Builder} */ public Builder protocolConfig(ProtocolConfig protocolConfig) { diff --git a/core/src/main/java/com/arangodb/ArangoDatabase.java b/core/src/main/java/com/arangodb/ArangoDatabase.java index 1c7653360..4af6cee38 100644 --- a/core/src/main/java/com/arangodb/ArangoDatabase.java +++ b/core/src/main/java/com/arangodb/ArangoDatabase.java @@ -69,8 +69,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Returns the name of the used storage engine. * * @return the storage engine name - * @see - * API + * @see API * Documentation */ ArangoDBEngine getEngine(); @@ -79,8 +78,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Checks whether the database exists * * @return true if the database exists, otherwise false - * @see - * API + * @see API * Documentation */ boolean exists(); @@ -149,8 +147,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * * @param id The index-handle * @return information about the index - * @see - * API Documentation + * @see API Documentation */ IndexEntity getIndex(String id); @@ -159,8 +156,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * * @param id The index-handle * @return the id of the index - * @see - * API Documentation + * @see API Documentation */ String deleteIndex(String id); @@ -229,9 +225,9 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * * @param user The name of the user * @param permissions The permissions the user grant - * @since ArangoDB 3.2.0 * @see * API Documentation + * @since ArangoDB 3.2.0 */ void grantDefaultCollectionAccess(String user, Permissions permissions); @@ -255,11 +251,10 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * @param bindVars key/value pairs defining the variables to bind the query to * @param options Additional options that will be passed to the query API, can be null * @return cursor of the results - * @see - * API + * @see API * Documentation */ - ArangoCursor query(String query, Class type, Map bindVars, AqlQueryOptions options); + ArangoCursor query(String query, Class type, Map bindVars, AqlQueryOptions options); /** * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the @@ -269,8 +264,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) * @param options Additional options that will be passed to the query API, can be null * @return cursor of the results - * @see - * API + * @see API * Documentation */ ArangoCursor query(String query, Class type, AqlQueryOptions options); @@ -283,11 +277,10 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) * @param bindVars key/value pairs defining the variables to bind the query to * @return cursor of the results - * @see - * API + * @see API * Documentation */ - ArangoCursor query(String query, Class type, Map bindVars); + ArangoCursor query(String query, Class type, Map bindVars); /** * Performs a database query using the given {@code query}, then returns a new {@code ArangoCursor} instance for the @@ -296,8 +289,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * @param query An AQL query string * @param type The type of the result (POJO or {@link com.arangodb.util.RawData}) * @return cursor of the results - * @see - * API + * @see API * Documentation */ ArangoCursor query(String query, Class type); @@ -354,6 +346,35 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { */ ArangoCursor cursor(String cursorId, Class type, String nextBatchId, AqlQueryOptions options); + /** + * Explain an AQL query and return information about it + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null + * @return information about the query + * @see API + * Documentation + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, AqlQueryExplainOptions)} instead + */ + @Deprecated + AqlExecutionExplainEntity explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Explain an AQL query and return information about it + * + * @param query the query which you want explained + * @param bindVars key/value pairs representing the bind parameters + * @param options Additional options, can be null + * @return information about the query + * @see API + * Documentation + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ + @Deprecated + AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** * Explain an AQL query and return information about it * @@ -364,7 +385,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * @see API * Documentation */ - AqlExecutionExplainEntity explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options); /** * Parse an AQL query and return information about it This method is for query validation only. To actually query @@ -560,8 +581,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * * @param options Additional options, can be null * @return information about the transaction - * @see - * API + * @see API * Documentation * @since ArangoDB 3.5.0 */ @@ -571,8 +591,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Aborts a Stream Transaction. * * @return information about the transaction - * @see - * API + * @see API * Documentation */ StreamTransactionEntity abortStreamTransaction(String id); @@ -581,8 +600,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Gets information about a Stream Transaction. * * @return information about the transaction - * @see - * + * @see * API Documentation * @since ArangoDB 3.5.0 */ @@ -592,8 +610,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Gets all the currently running Stream Transactions. * * @return all the currently running Stream Transactions - * @see - * + * @see * API Documentation * @since ArangoDB 3.5.0 */ @@ -603,8 +620,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Commits a Stream Transaction. * * @return information about the transaction - * @see - * + * @see * API Documentation * @since ArangoDB 3.5.0 */ @@ -633,8 +649,7 @@ public interface ArangoDatabase extends ArangoSerdeAccessor { * Fetches all views from the database and returns a list of view descriptions. * * @return list of information about all views - * @see - * API Documentation + * @see API Documentation * @since ArangoDB 3.4.0 */ Collection getViews(); diff --git a/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java index 6f4c2a366..41b2e34d6 100644 --- a/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java +++ b/core/src/main/java/com/arangodb/ArangoDatabaseAsync.java @@ -146,11 +146,11 @@ public interface ArangoDatabaseAsync extends ArangoSerdeAccessor { */ CompletableFuture getPermissions(String user); - CompletableFuture> query(String query, Class type, Map bindVars, AqlQueryOptions options); + CompletableFuture> query(String query, Class type, Map bindVars, AqlQueryOptions options); CompletableFuture> query(String query, Class type, AqlQueryOptions options); - CompletableFuture> query(String query, Class type, Map bindVars); + CompletableFuture> query(String query, Class type, Map bindVars); CompletableFuture> query(String query, Class type); @@ -164,8 +164,24 @@ public interface ArangoDatabaseAsync extends ArangoSerdeAccessor { /** * Asynchronous version of {@link ArangoDatabase#explainQuery(String, Map, AqlQueryExplainOptions)} + * + * @deprecated for removal, use {@link ArangoDatabaseAsync#explainAqlQuery(String, Map, AqlQueryExplainOptions)} instead + */ + @Deprecated + CompletableFuture explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, AqlQueryExplainOptions)} + * + * @deprecated for removal, use {@link ArangoDatabaseAsync#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead + */ + @Deprecated + CompletableFuture explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options); + + /** + * Asynchronous version of {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} */ - CompletableFuture explainQuery(String query, Map bindVars, AqlQueryExplainOptions options); + CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options); /** * Asynchronous version of {@link ArangoDatabase#parseQuery(String)} diff --git a/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java b/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java index 774b3c31d..cd48ae6a9 100644 --- a/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java +++ b/core/src/main/java/com/arangodb/config/ArangoConfigProperties.java @@ -7,9 +7,39 @@ import java.util.List; import java.util.Optional; +import java.util.Properties; public interface ArangoConfigProperties { + //region configuration properties keys + String KEY_HOSTS = "hosts"; + String KEY_PROTOCOL = "protocol"; + String KEY_USER = "user"; + String KEY_PASSWORD = "password"; + String KEY_JWT = "jwt"; + String KEY_TIMEOUT = "timeout"; + String KEY_USE_SSL = "useSsl"; + String KEY_SSL_CERT_VALUE = "sslCertValue"; + String KEY_SSL_ALGORITHM = "sslAlgorithm"; + String KEY_SSL_PROTOCOL = "sslProtocol"; + String KEY_VERIFY_HOST = "verifyHost"; + String KEY_CHUNK_SIZE = "chunkSize"; + String KEY_PIPELINING = "pipelining"; + String KEY_CONNECTION_WINDOW_SIZE = "connectionWindowSize"; + String KEY_INITIAL_WINDOW_SIZE = "initialWindowSize"; + String KEY_MAX_CONNECTIONS = "maxConnections"; + String KEY_CONNECTION_TTL = "connectionTtl"; + String KEY_KEEP_ALIVE_INTERVAL = "keepAliveInterval"; + String KEY_ACQUIRE_HOST_LIST = "acquireHostList"; + String KEY_ACQUIRE_HOST_LIST_INTERVAL = "acquireHostListInterval"; + String KEY_LOAD_BALANCING_STRATEGY = "loadBalancingStrategy"; + String KEY_RESPONSE_QUEUE_TIME_SAMPLES = "responseQueueTimeSamples"; + String KEY_COMPRESSION = "compression"; + String KEY_COMPRESSION_THRESHOLD = "compressionThreshold"; + String KEY_COMPRESSION_LEVEL = "compressionLevel"; + String KEY_SERDE_PROVIDER_CLASS = "serdeProviderClass"; + //endregion + /** * Reads properties from file arangodb.properties. * Properties must be prefixed with @{code "arangodb"}, eg. @{code "arangodb.hosts=localhost:8529"}. @@ -34,6 +64,22 @@ static ArangoConfigProperties fromFile(final String fileName, final String prefi return new ArangoConfigPropertiesImpl(fileName, prefix); } + /** + * Creates {@code ArangoConfigProperties} from Java properties ({@link java.util.Properties}). + * Properties must be prefixed with @{code "arangodb"}, eg. @{code "arangodb.hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromProperties(final Properties properties) { + return new ArangoConfigPropertiesImpl(properties); + } + + /** + * Creates {@code ArangoConfigProperties} from Java properties ({@link java.util.Properties}). + * Properties must be prefixed with @{code prefix}, eg. @{code ".hosts=localhost:8529"}. + */ + static ArangoConfigProperties fromProperties(final Properties properties, final String prefix) { + return new ArangoConfigPropertiesImpl(properties, prefix); + } + default Optional> getHosts() { return Optional.empty(); } @@ -62,6 +108,18 @@ default Optional getUseSsl() { return Optional.empty(); } + default Optional getSslCertValue() { + return Optional.empty(); + } + + default Optional getSslAlgorithm() { + return Optional.empty(); + } + + default Optional getSslProtocol() { + return Optional.empty(); + } + default Optional getVerifyHost() { return Optional.empty(); } @@ -70,6 +128,18 @@ default Optional getChunkSize() { return Optional.empty(); } + default Optional getPipelining() { + return Optional.empty(); + } + + default Optional getConnectionWindowSize() { + return Optional.empty(); + } + + default Optional getInitialWindowSize() { + return Optional.empty(); + } + default Optional getMaxConnections() { return Optional.empty(); } diff --git a/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java b/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java index 2bd5d4982..5019834c8 100644 --- a/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java +++ b/core/src/main/java/com/arangodb/entity/AbstractBaseDocument.java @@ -26,6 +26,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; +import java.io.Serializable; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,7 +36,9 @@ * @author Mark Vollmary * @author Michele Rastelli */ -abstract class AbstractBaseDocument { +abstract class AbstractBaseDocument implements Serializable { + + private static final long serialVersionUID = 6985324876843525239L; private static final String[] META_PROPS = new String[]{ DocumentFields.ID, diff --git a/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java index 1b2dece59..eb56fc74b 100644 --- a/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java +++ b/core/src/main/java/com/arangodb/entity/AqlExecutionExplainEntity.java @@ -20,11 +20,18 @@ package com.arangodb.entity; +import com.arangodb.ArangoDatabase; +import com.arangodb.model.ExplainAqlQueryOptions; + import java.util.Collection; +import java.util.Map; +import java.util.Objects; /** * @author Mark Vollmary + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead */ +@Deprecated public final class AqlExecutionExplainEntity { private ExecutionPlan plan; @@ -53,6 +60,18 @@ public Boolean getCacheable() { return cacheable; } + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlExecutionExplainEntity)) return false; + AqlExecutionExplainEntity that = (AqlExecutionExplainEntity) o; + return Objects.equals(plan, that.plan) && Objects.equals(plans, that.plans) && Objects.equals(warnings, that.warnings) && Objects.equals(stats, that.stats) && Objects.equals(cacheable, that.cacheable); + } + + @Override + public int hashCode() { + return Objects.hash(plan, plans, warnings, stats, cacheable); + } + public static final class ExecutionPlan { private Collection nodes; private Collection rules; @@ -84,6 +103,18 @@ public Integer getEstimatedCost() { public Integer getEstimatedNrItems() { return estimatedNrItems; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionPlan)) return false; + ExecutionPlan that = (ExecutionPlan) o; + return Objects.equals(nodes, that.nodes) && Objects.equals(rules, that.rules) && Objects.equals(collections, that.collections) && Objects.equals(variables, that.variables) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(estimatedNrItems, that.estimatedNrItems); + } + + @Override + public int hashCode() { + return Objects.hash(nodes, rules, collections, variables, estimatedCost, estimatedNrItems); + } } public static final class ExecutionNode { @@ -202,6 +233,18 @@ public ExecutionCollection getCondition() { public Boolean getReverse() { return reverse; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionNode)) return false; + ExecutionNode that = (ExecutionNode) o; + return Objects.equals(type, that.type) && Objects.equals(dependencies, that.dependencies) && Objects.equals(id, that.id) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(estimatedNrItems, that.estimatedNrItems) && Objects.equals(depth, that.depth) && Objects.equals(database, that.database) && Objects.equals(collection, that.collection) && Objects.equals(inVariable, that.inVariable) && Objects.equals(outVariable, that.outVariable) && Objects.equals(conditionVariable, that.conditionVariable) && Objects.equals(random, that.random) && Objects.equals(offset, that.offset) && Objects.equals(limit, that.limit) && Objects.equals(fullCount, that.fullCount) && Objects.equals(subquery, that.subquery) && Objects.equals(isConst, that.isConst) && Objects.equals(canThrow, that.canThrow) && Objects.equals(expressionType, that.expressionType) && Objects.equals(indexes, that.indexes) && Objects.equals(expression, that.expression) && Objects.equals(condition, that.condition) && Objects.equals(reverse, that.reverse); + } + + @Override + public int hashCode() { + return Objects.hash(type, dependencies, id, estimatedCost, estimatedNrItems, depth, database, collection, inVariable, outVariable, conditionVariable, random, offset, limit, fullCount, subquery, isConst, canThrow, expressionType, indexes, expression, condition, reverse); + } } public static final class ExecutionVariable { @@ -215,6 +258,18 @@ public Long getId() { public String getName() { return name; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionVariable)) return false; + ExecutionVariable that = (ExecutionVariable) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(id, name); + } } public static final class ExecutionExpression { @@ -258,6 +313,18 @@ public Collection getLevels() { public Collection getSubNodes() { return subNodes; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionExpression)) return false; + ExecutionExpression that = (ExecutionExpression) o; + return Objects.equals(type, that.type) && Objects.equals(name, that.name) && Objects.equals(id, that.id) && Objects.equals(value, that.value) && Objects.equals(sorted, that.sorted) && Objects.equals(quantifier, that.quantifier) && Objects.equals(levels, that.levels) && Objects.equals(subNodes, that.subNodes); + } + + @Override + public int hashCode() { + return Objects.hash(type, name, id, value, sorted, quantifier, levels, subNodes); + } } public static final class ExecutionCollection { @@ -271,6 +338,18 @@ public String getName() { public String getType() { return type; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionCollection)) return false; + ExecutionCollection that = (ExecutionCollection) o; + return Objects.equals(name, that.name) && Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } } public static final class ExecutionStats { @@ -299,6 +378,18 @@ public Long getPeakMemoryUsage() { public Double getExecutionTime() { return executionTime; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionStats)) return false; + ExecutionStats that = (ExecutionStats) o; + return Objects.equals(rulesExecuted, that.rulesExecuted) && Objects.equals(rulesSkipped, that.rulesSkipped) && Objects.equals(plansCreated, that.plansCreated) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && Objects.equals(executionTime, that.executionTime); + } + + @Override + public int hashCode() { + return Objects.hash(rulesExecuted, rulesSkipped, plansCreated, peakMemoryUsage, executionTime); + } } } diff --git a/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java b/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java index 81026a26d..78ff58921 100644 --- a/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java +++ b/core/src/main/java/com/arangodb/entity/AqlFunctionEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -56,4 +58,15 @@ public Boolean getIsDeterministic() { return isDeterministic; } + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlFunctionEntity)) return false; + AqlFunctionEntity that = (AqlFunctionEntity) o; + return Objects.equals(name, that.name) && Objects.equals(code, that.code) && Objects.equals(isDeterministic, that.isDeterministic); + } + + @Override + public int hashCode() { + return Objects.hash(name, code, isDeterministic); + } } diff --git a/core/src/main/java/com/arangodb/entity/AqlParseEntity.java b/core/src/main/java/com/arangodb/entity/AqlParseEntity.java index 15bcc82e1..3dd7bf9ac 100644 --- a/core/src/main/java/com/arangodb/entity/AqlParseEntity.java +++ b/core/src/main/java/com/arangodb/entity/AqlParseEntity.java @@ -21,6 +21,7 @@ package com.arangodb.entity; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -43,6 +44,18 @@ public Collection getAst() { return ast; } + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlParseEntity)) return false; + AqlParseEntity that = (AqlParseEntity) o; + return Objects.equals(collections, that.collections) && Objects.equals(bindVars, that.bindVars) && Objects.equals(ast, that.ast); + } + + @Override + public int hashCode() { + return Objects.hash(collections, bindVars, ast); + } + public static final class AstNode { private String type; private Collection subNodes; @@ -70,6 +83,17 @@ public Object getValue() { return value; } + @Override + public boolean equals(Object o) { + if (!(o instanceof AstNode)) return false; + AstNode astNode = (AstNode) o; + return Objects.equals(type, astNode.type) && Objects.equals(subNodes, astNode.subNodes) && Objects.equals(name, astNode.name) && Objects.equals(id, astNode.id) && Objects.equals(value, astNode.value); + } + + @Override + public int hashCode() { + return Objects.hash(type, subNodes, name, id, value); + } } } diff --git a/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java b/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java new file mode 100644 index 000000000..c4eb9ea22 --- /dev/null +++ b/core/src/main/java/com/arangodb/entity/AqlQueryExplainEntity.java @@ -0,0 +1,220 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.entity; + +import com.fasterxml.jackson.annotation.JsonAnySetter; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public final class AqlQueryExplainEntity { + + private ExecutionPlan plan; + private Collection plans; + private Collection warnings; + private ExecutionStats stats; + private Boolean cacheable; + + public ExecutionPlan getPlan() { + return plan; + } + + public Collection getPlans() { + return plans; + } + + public Collection getWarnings() { + return warnings; + } + + public ExecutionStats getStats() { + return stats; + } + + public Boolean getCacheable() { + return cacheable; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof AqlQueryExplainEntity)) return false; + AqlQueryExplainEntity that = (AqlQueryExplainEntity) o; + return Objects.equals(plan, that.plan) && Objects.equals(plans, that.plans) && Objects.equals(warnings, that.warnings) && Objects.equals(stats, that.stats) && Objects.equals(cacheable, that.cacheable); + } + + @Override + public int hashCode() { + return Objects.hash(plan, plans, warnings, stats, cacheable); + } + + public static final class ExecutionPlan { + private final Map properties = new HashMap<>(); + private Collection nodes; + private Double estimatedCost; + private Collection collections; + private Collection rules; + private Collection variables; + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + public Collection getNodes() { + return nodes; + } + + public Double getEstimatedCost() { + return estimatedCost; + } + + public Collection getCollections() { + return collections; + } + + public Collection getRules() { + return rules; + } + + public Collection getVariables() { + return variables; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionPlan)) return false; + ExecutionPlan that = (ExecutionPlan) o; + return Objects.equals(properties, that.properties) && Objects.equals(nodes, that.nodes) && Objects.equals(estimatedCost, that.estimatedCost) && Objects.equals(collections, that.collections) && Objects.equals(rules, that.rules) && Objects.equals(variables, that.variables); + } + + @Override + public int hashCode() { + return Objects.hash(properties, nodes, estimatedCost, collections, rules, variables); + } + } + + public static final class ExecutionNode { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionNode)) return false; + ExecutionNode that = (ExecutionNode) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionVariable { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionVariable)) return false; + ExecutionVariable that = (ExecutionVariable) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionCollection { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionCollection)) return false; + ExecutionCollection that = (ExecutionCollection) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + + public static final class ExecutionStats { + private final Map properties = new HashMap<>(); + + @JsonAnySetter + public void add(String key, Object value) { + properties.put(key, value); + } + + public Object get(String key) { + return properties.get(key); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ExecutionStats)) return false; + ExecutionStats that = (ExecutionStats) o; + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return Objects.hashCode(properties); + } + } + +} diff --git a/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java b/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java index 3318c3d7c..30f811800 100644 --- a/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java +++ b/core/src/main/java/com/arangodb/entity/ArangoDBEngine.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Michele Rastelli */ @@ -38,6 +40,18 @@ public StorageEngineName getName() { return name; } + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoDBEngine)) return false; + ArangoDBEngine that = (ArangoDBEngine) o; + return name == that.name; + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } + public enum StorageEngineName { mmfiles, rocksdb } diff --git a/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java b/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java index d67e5ab4b..6fd696166 100644 --- a/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java +++ b/core/src/main/java/com/arangodb/entity/ArangoDBVersion.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -55,4 +57,15 @@ public License getLicense() { return license; } + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoDBVersion)) return false; + ArangoDBVersion that = (ArangoDBVersion) o; + return Objects.equals(server, that.server) && Objects.equals(version, that.version) && license == that.license; + } + + @Override + public int hashCode() { + return Objects.hash(server, version, license); + } } \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/CollectionEntity.java b/core/src/main/java/com/arangodb/entity/CollectionEntity.java index 43eef1d17..45ddeaf12 100644 --- a/core/src/main/java/com/arangodb/entity/CollectionEntity.java +++ b/core/src/main/java/com/arangodb/entity/CollectionEntity.java @@ -24,6 +24,7 @@ import com.arangodb.model.ComputedValue; import java.util.List; +import java.util.Objects; /** * @author Mark Vollmary @@ -59,6 +60,7 @@ public Boolean getIsSystem() { return isSystem; } + @Deprecated public CollectionStatus getStatus() { return status; } @@ -83,4 +85,15 @@ public List getComputedValues() { return computedValues; } + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionEntity)) return false; + CollectionEntity that = (CollectionEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(waitForSync, that.waitForSync) && Objects.equals(isSystem, that.isSystem) && status == that.status && type == that.type && Objects.equals(schema, that.schema) && Objects.equals(computedValues, that.computedValues); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, waitForSync, isSystem, status, type, schema, computedValues); + } } diff --git a/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java index cba0f366a..8f5076639 100644 --- a/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java +++ b/core/src/main/java/com/arangodb/entity/CollectionPropertiesEntity.java @@ -21,78 +21,93 @@ package com.arangodb.entity; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary */ public final class CollectionPropertiesEntity extends CollectionEntity { + private Boolean cacheEnabled; + private String distributeShardsLike; + private Boolean isDisjoint; + private Boolean isSmart; private KeyOptions keyOptions; - private Long count; private Integer numberOfShards; - private Collection shardKeys; private ReplicationFactor replicationFactor; - private Integer writeConcern; + private Collection shardKeys; private String shardingStrategy; // cluster option + private String smartGraphAttribute; private String smartJoinAttribute; // enterprise option + private Integer writeConcern; + private Long count; public CollectionPropertiesEntity() { super(); } - public KeyOptions getKeyOptions() { - return keyOptions; + public Boolean getCacheEnabled() { + return cacheEnabled; } - public void setKeyOptions(final KeyOptions keyOptions) { - this.keyOptions = keyOptions; + public void setCacheEnabled(Boolean cacheEnabled) { + this.cacheEnabled = cacheEnabled; } - public Long getCount() { - return count; + public String getDistributeShardsLike() { + return distributeShardsLike; } - public void setCount(final Long count) { - this.count = count; + public void setDistributeShardsLike(String distributeShardsLike) { + this.distributeShardsLike = distributeShardsLike; } - /** - * @return the number of shards of the collection. Only in a cluster setup (else returning null). - */ - public Integer getNumberOfShards() { - return numberOfShards; + public Boolean getDisjoint() { + return isDisjoint; } - public void setNumberOfShards(final Integer numberOfShards) { - this.numberOfShards = numberOfShards; + public void setDisjoint(Boolean disjoint) { + isDisjoint = disjoint; } - /** - * @return the names of document attributes that are used to determine the target shard for documents. - * Only in a cluster setup (else returning null). - */ - public Collection getShardKeys() { - return shardKeys; + public Boolean getSmart() { + return isSmart; } - public void setShardKeys(final Collection shardKeys) { - this.shardKeys = shardKeys; + public void setSmart(Boolean smart) { + isSmart = smart; + } + + public KeyOptions getKeyOptions() { + return keyOptions; + } + + public void setKeyOptions(KeyOptions keyOptions) { + this.keyOptions = keyOptions; + } + + public Integer getNumberOfShards() { + return numberOfShards; + } + + public void setNumberOfShards(Integer numberOfShards) { + this.numberOfShards = numberOfShards; } public ReplicationFactor getReplicationFactor() { return replicationFactor; } - public void setReplicationFactor(final ReplicationFactor replicationFactor) { + public void setReplicationFactor(ReplicationFactor replicationFactor) { this.replicationFactor = replicationFactor; } - public Integer getWriteConcern() { - return writeConcern; + public Collection getShardKeys() { + return shardKeys; } - public void setWriteConcern(final Integer writeConcern) { - this.writeConcern = writeConcern; + public void setShardKeys(Collection shardKeys) { + this.shardKeys = shardKeys; } public String getShardingStrategy() { @@ -103,6 +118,14 @@ public void setShardingStrategy(String shardingStrategy) { this.shardingStrategy = shardingStrategy; } + public String getSmartGraphAttribute() { + return smartGraphAttribute; + } + + public void setSmartGraphAttribute(String smartGraphAttribute) { + this.smartGraphAttribute = smartGraphAttribute; + } + public String getSmartJoinAttribute() { return smartJoinAttribute; } @@ -111,4 +134,32 @@ public void setSmartJoinAttribute(String smartJoinAttribute) { this.smartJoinAttribute = smartJoinAttribute; } + public Integer getWriteConcern() { + return writeConcern; + } + + public void setWriteConcern(Integer writeConcern) { + this.writeConcern = writeConcern; + } + + public Long getCount() { + return count; + } + + public void setCount(Long count) { + this.count = count; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionPropertiesEntity)) return false; + if (!super.equals(o)) return false; + CollectionPropertiesEntity that = (CollectionPropertiesEntity) o; + return Objects.equals(cacheEnabled, that.cacheEnabled) && Objects.equals(distributeShardsLike, that.distributeShardsLike) && Objects.equals(isDisjoint, that.isDisjoint) && Objects.equals(isSmart, that.isSmart) && Objects.equals(keyOptions, that.keyOptions) && Objects.equals(numberOfShards, that.numberOfShards) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(shardKeys, that.shardKeys) && Objects.equals(shardingStrategy, that.shardingStrategy) && Objects.equals(smartGraphAttribute, that.smartGraphAttribute) && Objects.equals(smartJoinAttribute, that.smartJoinAttribute) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(count, that.count); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cacheEnabled, distributeShardsLike, isDisjoint, isSmart, keyOptions, numberOfShards, replicationFactor, shardKeys, shardingStrategy, smartGraphAttribute, smartJoinAttribute, writeConcern, count); + } } diff --git a/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java b/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java index 6e90e456a..02e8e2ae3 100644 --- a/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java +++ b/core/src/main/java/com/arangodb/entity/CollectionRevisionEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -31,4 +33,16 @@ public String getRevision() { return revision; } + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionRevisionEntity)) return false; + if (!super.equals(o)) return false; + CollectionRevisionEntity that = (CollectionRevisionEntity) o; + return Objects.equals(revision, that.revision); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), revision); + } } diff --git a/core/src/main/java/com/arangodb/entity/CollectionStatus.java b/core/src/main/java/com/arangodb/entity/CollectionStatus.java index b3d30f5d6..39b7863b9 100644 --- a/core/src/main/java/com/arangodb/entity/CollectionStatus.java +++ b/core/src/main/java/com/arangodb/entity/CollectionStatus.java @@ -23,6 +23,7 @@ /** * @author Mark Vollmary */ +@Deprecated public enum CollectionStatus { LOADED(3), DELETED(5); diff --git a/core/src/main/java/com/arangodb/entity/CursorEntity.java b/core/src/main/java/com/arangodb/entity/CursorEntity.java index 71b34f31d..6070ddc1a 100644 --- a/core/src/main/java/com/arangodb/entity/CursorEntity.java +++ b/core/src/main/java/com/arangodb/entity/CursorEntity.java @@ -25,6 +25,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * @author Mark Vollmary @@ -110,6 +111,18 @@ public String getNextBatchId() { return nextBatchId; } + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorEntity)) return false; + CursorEntity that = (CursorEntity) o; + return Objects.equals(id, that.id) && Objects.equals(count, that.count) && Objects.equals(cached, that.cached) && Objects.equals(hasMore, that.hasMore) && Objects.equals(result, that.result) && Objects.equals(potentialDirtyRead, that.potentialDirtyRead) && Objects.equals(nextBatchId, that.nextBatchId) && Objects.equals(extra, that.extra); + } + + @Override + public int hashCode() { + return Objects.hash(id, count, cached, hasMore, result, potentialDirtyRead, nextBatchId, extra); + } + public static final class Extras { private final Collection warnings = Collections.emptyList(); private CursorStats stats; @@ -121,6 +134,18 @@ public CursorStats getStats() { public Collection getWarnings() { return warnings; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Extras)) return false; + Extras extras = (Extras) o; + return Objects.equals(warnings, extras.warnings) && Objects.equals(stats, extras.stats); + } + + @Override + public int hashCode() { + return Objects.hash(warnings, stats); + } } } diff --git a/core/src/main/java/com/arangodb/entity/CursorStats.java b/core/src/main/java/com/arangodb/entity/CursorStats.java index 8f432b6e2..2d5ce96a3 100644 --- a/core/src/main/java/com/arangodb/entity/CursorStats.java +++ b/core/src/main/java/com/arangodb/entity/CursorStats.java @@ -4,6 +4,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Objects; public final class CursorStats { private final Map properties = new HashMap<>(); @@ -146,6 +147,11 @@ public Long getPeakMemoryUsage() { return peakMemoryUsage; } + /** + * @return The number of real document lookups caused by late materialization as well as `IndexNode`s that had to + * load document attributes not covered by the index. This is how many documents had to be fetched from storage + * after an index scan that initially covered the attribute access for these documents. + */ public Integer getDocumentLookups() { return documentLookups; } @@ -160,7 +166,22 @@ public Integer getIntermediateCommits() { return intermediateCommits; } + /** + * @return The number of seek calls done by RocksDB iterators for merge joins (`JoinNode` in the execution plan). + */ public Integer getSeeks() { return seeks; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorStats)) return false; + CursorStats that = (CursorStats) o; + return Objects.equals(properties, that.properties) && Objects.equals(writesExecuted, that.writesExecuted) && Objects.equals(writesIgnored, that.writesIgnored) && Objects.equals(scannedFull, that.scannedFull) && Objects.equals(scannedIndex, that.scannedIndex) && Objects.equals(cursorsCreated, that.cursorsCreated) && Objects.equals(cursorsRearmed, that.cursorsRearmed) && Objects.equals(cacheHits, that.cacheHits) && Objects.equals(cacheMisses, that.cacheMisses) && Objects.equals(filtered, that.filtered) && Objects.equals(httpRequests, that.httpRequests) && Objects.equals(fullCount, that.fullCount) && Objects.equals(executionTime, that.executionTime) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && Objects.equals(documentLookups, that.documentLookups) && Objects.equals(intermediateCommits, that.intermediateCommits) && Objects.equals(seeks, that.seeks); + } + + @Override + public int hashCode() { + return Objects.hash(properties, writesExecuted, writesIgnored, scannedFull, scannedIndex, cursorsCreated, cursorsRearmed, cacheHits, cacheMisses, filtered, httpRequests, fullCount, executionTime, peakMemoryUsage, documentLookups, intermediateCommits, seeks); + } } diff --git a/core/src/main/java/com/arangodb/entity/CursorWarning.java b/core/src/main/java/com/arangodb/entity/CursorWarning.java index 72dc8ff1c..96d541efe 100644 --- a/core/src/main/java/com/arangodb/entity/CursorWarning.java +++ b/core/src/main/java/com/arangodb/entity/CursorWarning.java @@ -1,5 +1,7 @@ package com.arangodb.entity; +import java.util.Objects; + public final class CursorWarning { private Integer code; @@ -13,4 +15,15 @@ public String getMessage() { return message; } + @Override + public boolean equals(Object o) { + if (!(o instanceof CursorWarning)) return false; + CursorWarning that = (CursorWarning) o; + return Objects.equals(code, that.code) && Objects.equals(message, that.message); + } + + @Override + public int hashCode() { + return Objects.hash(code, message); + } } diff --git a/core/src/main/java/com/arangodb/entity/DatabaseEntity.java b/core/src/main/java/com/arangodb/entity/DatabaseEntity.java index 97005edef..73df87062 100644 --- a/core/src/main/java/com/arangodb/entity/DatabaseEntity.java +++ b/core/src/main/java/com/arangodb/entity/DatabaseEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -92,4 +94,16 @@ public Integer getWriteConcern() { public String getSharding() { return sharding; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DatabaseEntity)) return false; + DatabaseEntity that = (DatabaseEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && Objects.equals(path, that.path) && Objects.equals(isSystem, that.isSystem) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(sharding, that.sharding); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, path, isSystem, replicationFactor, writeConcern, sharding); + } } diff --git a/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java b/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java index 0eeff2cf9..c5329e95c 100644 --- a/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java +++ b/core/src/main/java/com/arangodb/entity/DocumentCreateEntity.java @@ -22,6 +22,8 @@ import com.arangodb.internal.serde.UserData; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -59,4 +61,16 @@ public void setOld(final T oldDocument) { this.oldDocument = oldDocument; } + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentCreateEntity)) return false; + if (!super.equals(o)) return false; + DocumentCreateEntity that = (DocumentCreateEntity) o; + return Objects.equals(newDocument, that.newDocument) && Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), newDocument, oldDocument); + } } diff --git a/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java b/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java index fac6c36ae..41674fdbe 100644 --- a/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java +++ b/core/src/main/java/com/arangodb/entity/DocumentDeleteEntity.java @@ -22,6 +22,8 @@ import com.arangodb.internal.serde.UserData; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -45,4 +47,17 @@ public T getOld() { public void setOld(final T oldDocument) { this.oldDocument = oldDocument; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentDeleteEntity)) return false; + if (!super.equals(o)) return false; + DocumentDeleteEntity that = (DocumentDeleteEntity) o; + return Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldDocument); + } } diff --git a/core/src/main/java/com/arangodb/entity/DocumentEntity.java b/core/src/main/java/com/arangodb/entity/DocumentEntity.java index 56cc8545b..c0f82bd27 100644 --- a/core/src/main/java/com/arangodb/entity/DocumentEntity.java +++ b/core/src/main/java/com/arangodb/entity/DocumentEntity.java @@ -23,6 +23,8 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -56,4 +58,15 @@ public String getRev() { return rev; } + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentEntity)) return false; + DocumentEntity that = (DocumentEntity) o; + return Objects.equals(key, that.key) && Objects.equals(id, that.id) && Objects.equals(rev, that.rev); + } + + @Override + public int hashCode() { + return Objects.hash(key, id, rev); + } } diff --git a/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java b/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java index b4f6a469f..eb7d48f18 100644 --- a/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java +++ b/core/src/main/java/com/arangodb/entity/DocumentImportEntity.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -108,4 +109,15 @@ public void setDetails(final Collection details) { this.details = details; } + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentImportEntity)) return false; + DocumentImportEntity that = (DocumentImportEntity) o; + return Objects.equals(created, that.created) && Objects.equals(errors, that.errors) && Objects.equals(empty, that.empty) && Objects.equals(updated, that.updated) && Objects.equals(ignored, that.ignored) && Objects.equals(details, that.details); + } + + @Override + public int hashCode() { + return Objects.hash(created, errors, empty, updated, ignored, details); + } } diff --git a/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java b/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java index d98ee7e56..2a0a3b6c2 100644 --- a/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java +++ b/core/src/main/java/com/arangodb/entity/DocumentUpdateEntity.java @@ -23,6 +23,8 @@ import com.arangodb.internal.serde.UserData; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -66,4 +68,16 @@ public void setOld(final T oldDocument) { this.oldDocument = oldDocument; } + @Override + public boolean equals(Object o) { + if (!(o instanceof DocumentUpdateEntity)) return false; + if (!super.equals(o)) return false; + DocumentUpdateEntity that = (DocumentUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev) && Objects.equals(newDocument, that.newDocument) && Objects.equals(oldDocument, that.oldDocument); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev, newDocument, oldDocument); + } } diff --git a/core/src/main/java/com/arangodb/entity/EdgeDefinition.java b/core/src/main/java/com/arangodb/entity/EdgeDefinition.java index 980fc05bc..b89f67417 100644 --- a/core/src/main/java/com/arangodb/entity/EdgeDefinition.java +++ b/core/src/main/java/com/arangodb/entity/EdgeDefinition.java @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Objects; /** * @author Mark Vollmary @@ -80,11 +81,35 @@ public EdgeDefinition satellites(final String... satellites) { return this; } + @Override + public boolean equals(Object o) { + if (!(o instanceof EdgeDefinition)) return false; + EdgeDefinition that = (EdgeDefinition) o; + return Objects.equals(collection, that.collection) && Objects.equals(from, that.from) && Objects.equals(to, that.to) && Objects.equals(options, that.options); + } + + @Override + public int hashCode() { + return Objects.hash(collection, from, to, options); + } + public static final class Options { private Collection satellites = Collections.emptyList(); public Collection getSatellites() { return satellites; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Options)) return false; + Options options = (Options) o; + return Objects.equals(satellites, options.satellites); + } + + @Override + public int hashCode() { + return Objects.hashCode(satellites); + } } } diff --git a/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java b/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java index 6a5cb5a9a..15666c91d 100644 --- a/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java +++ b/core/src/main/java/com/arangodb/entity/EdgeUpdateEntity.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -38,4 +40,16 @@ public String getOldRev() { return oldRev; } + @Override + public boolean equals(Object o) { + if (!(o instanceof EdgeUpdateEntity)) return false; + if (!super.equals(o)) return false; + EdgeUpdateEntity that = (EdgeUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev); + } } diff --git a/core/src/main/java/com/arangodb/entity/ErrorEntity.java b/core/src/main/java/com/arangodb/entity/ErrorEntity.java index 5677d2301..534a70ecf 100644 --- a/core/src/main/java/com/arangodb/entity/ErrorEntity.java +++ b/core/src/main/java/com/arangodb/entity/ErrorEntity.java @@ -21,6 +21,7 @@ package com.arangodb.entity; import java.io.Serializable; +import java.util.Objects; /** * @author Mark Vollmary @@ -66,4 +67,15 @@ public int getErrorNum() { return errorNum; } + @Override + public boolean equals(Object o) { + if (!(o instanceof ErrorEntity)) return false; + ErrorEntity that = (ErrorEntity) o; + return code == that.code && errorNum == that.errorNum && Objects.equals(errorMessage, that.errorMessage) && Objects.equals(exception, that.exception); + } + + @Override + public int hashCode() { + return Objects.hash(errorMessage, exception, code, errorNum); + } } diff --git a/core/src/main/java/com/arangodb/entity/GraphEntity.java b/core/src/main/java/com/arangodb/entity/GraphEntity.java index dce443d60..9a068c566 100644 --- a/core/src/main/java/com/arangodb/entity/GraphEntity.java +++ b/core/src/main/java/com/arangodb/entity/GraphEntity.java @@ -21,6 +21,7 @@ package com.arangodb.entity; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -129,4 +130,16 @@ public String getSmartGraphAttribute() { public Boolean getIsSatellite() { return isSatellite; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof GraphEntity)) return false; + GraphEntity that = (GraphEntity) o; + return Objects.equals(name, that.name) && Objects.equals(edgeDefinitions, that.edgeDefinitions) && Objects.equals(orphanCollections, that.orphanCollections) && Objects.equals(numberOfShards, that.numberOfShards) && Objects.equals(_id, that._id) && Objects.equals(_rev, that._rev) && Objects.equals(replicationFactor, that.replicationFactor) && Objects.equals(writeConcern, that.writeConcern) && Objects.equals(isSmart, that.isSmart) && Objects.equals(isDisjoint, that.isDisjoint) && Objects.equals(smartGraphAttribute, that.smartGraphAttribute) && Objects.equals(isSatellite, that.isSatellite); + } + + @Override + public int hashCode() { + return Objects.hash(name, edgeDefinitions, orphanCollections, numberOfShards, _id, _rev, replicationFactor, writeConcern, isSmart, isDisjoint, smartGraphAttribute, isSatellite); + } } diff --git a/core/src/main/java/com/arangodb/entity/IndexEntity.java b/core/src/main/java/com/arangodb/entity/IndexEntity.java index b488845b7..cbf1140bb 100644 --- a/core/src/main/java/com/arangodb/entity/IndexEntity.java +++ b/core/src/main/java/com/arangodb/entity/IndexEntity.java @@ -23,6 +23,7 @@ import com.arangodb.model.MDIFieldValueTypes; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -133,4 +134,16 @@ public MDIFieldValueTypes getFieldValueTypes() { public Collection getPrefixFields() { return prefixFields; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof IndexEntity)) return false; + IndexEntity that = (IndexEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && type == that.type && Objects.equals(fields, that.fields) && Objects.equals(selectivityEstimate, that.selectivityEstimate) && Objects.equals(unique, that.unique) && Objects.equals(sparse, that.sparse) && Objects.equals(minLength, that.minLength) && Objects.equals(isNewlyCreated, that.isNewlyCreated) && Objects.equals(geoJson, that.geoJson) && Objects.equals(constraint, that.constraint) && Objects.equals(deduplicate, that.deduplicate) && Objects.equals(expireAfter, that.expireAfter) && Objects.equals(inBackground, that.inBackground) && Objects.equals(estimates, that.estimates) && Objects.equals(cacheEnabled, that.cacheEnabled) && Objects.equals(storedValues, that.storedValues) && Objects.equals(legacyPolygons, that.legacyPolygons) && fieldValueTypes == that.fieldValueTypes && Objects.equals(prefixFields, that.prefixFields); + } + + @Override + public int hashCode() { + return Objects.hash(id, name, type, fields, selectivityEstimate, unique, sparse, minLength, isNewlyCreated, geoJson, constraint, deduplicate, expireAfter, inBackground, estimates, cacheEnabled, storedValues, legacyPolygons, fieldValueTypes, prefixFields); + } } diff --git a/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java b/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java index 110f21ab9..102d96fbe 100644 --- a/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java +++ b/core/src/main/java/com/arangodb/entity/InvertedIndexEntity.java @@ -25,6 +25,7 @@ import com.arangodb.entity.arangosearch.StoredValue; import java.util.Collection; +import java.util.Objects; import java.util.Set; /** @@ -163,4 +164,16 @@ public Boolean getCache() { public Boolean getPrimaryKeyCache() { return primaryKeyCache; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof InvertedIndexEntity)) return false; + InvertedIndexEntity that = (InvertedIndexEntity) o; + return Objects.equals(id, that.id) && Objects.equals(isNewlyCreated, that.isNewlyCreated) && Objects.equals(unique, that.unique) && Objects.equals(sparse, that.sparse) && Objects.equals(version, that.version) && Objects.equals(code, that.code) && type == that.type && Objects.equals(name, that.name) && Objects.equals(fields, that.fields) && Objects.equals(searchField, that.searchField) && Objects.equals(storedValues, that.storedValues) && Objects.equals(optimizeTopK, that.optimizeTopK) && Objects.equals(primarySort, that.primarySort) && Objects.equals(analyzer, that.analyzer) && Objects.equals(features, that.features) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(trackListPositions, that.trackListPositions) && Objects.equals(cleanupIntervalStep, that.cleanupIntervalStep) && Objects.equals(commitIntervalMsec, that.commitIntervalMsec) && Objects.equals(consolidationIntervalMsec, that.consolidationIntervalMsec) && Objects.equals(consolidationPolicy, that.consolidationPolicy) && Objects.equals(writebufferIdle, that.writebufferIdle) && Objects.equals(writebufferActive, that.writebufferActive) && Objects.equals(writebufferSizeMax, that.writebufferSizeMax) && Objects.equals(cache, that.cache) && Objects.equals(primaryKeyCache, that.primaryKeyCache); + } + + @Override + public int hashCode() { + return Objects.hash(id, isNewlyCreated, unique, sparse, version, code, type, name, fields, searchField, storedValues, optimizeTopK, primarySort, analyzer, features, includeAllFields, trackListPositions, cleanupIntervalStep, commitIntervalMsec, consolidationIntervalMsec, consolidationPolicy, writebufferIdle, writebufferActive, writebufferSizeMax, cache, primaryKeyCache); + } } diff --git a/core/src/main/java/com/arangodb/entity/KeyOptions.java b/core/src/main/java/com/arangodb/entity/KeyOptions.java index 7206c9dbb..dbeb87d8c 100644 --- a/core/src/main/java/com/arangodb/entity/KeyOptions.java +++ b/core/src/main/java/com/arangodb/entity/KeyOptions.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -74,4 +76,15 @@ public void setOffset(final Integer offset) { this.offset = offset; } + @Override + public boolean equals(Object o) { + if (!(o instanceof KeyOptions)) return false; + KeyOptions that = (KeyOptions) o; + return Objects.equals(allowUserKeys, that.allowUserKeys) && type == that.type && Objects.equals(increment, that.increment) && Objects.equals(offset, that.offset); + } + + @Override + public int hashCode() { + return Objects.hash(allowUserKeys, type, increment, offset); + } } diff --git a/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java b/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java index a78a3f93d..9622525b1 100644 --- a/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java +++ b/core/src/main/java/com/arangodb/entity/LogEntriesEntity.java @@ -21,6 +21,7 @@ package com.arangodb.entity; import java.util.List; +import java.util.Objects; /** * @author Michele Rastelli @@ -39,6 +40,18 @@ public List getMessages() { return messages; } + @Override + public boolean equals(Object o) { + if (!(o instanceof LogEntriesEntity)) return false; + LogEntriesEntity that = (LogEntriesEntity) o; + return Objects.equals(total, that.total) && Objects.equals(messages, that.messages); + } + + @Override + public int hashCode() { + return Objects.hash(total, messages); + } + public static final class Message { Long id; String topic; @@ -65,6 +78,18 @@ public String getDate() { public String getMessage() { return message; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Message)) return false; + Message message1 = (Message) o; + return Objects.equals(id, message1.id) && Objects.equals(topic, message1.topic) && Objects.equals(level, message1.level) && Objects.equals(date, message1.date) && Objects.equals(message, message1.message); + } + + @Override + public int hashCode() { + return Objects.hash(id, topic, level, date, message); + } } } diff --git a/core/src/main/java/com/arangodb/entity/LogLevelEntity.java b/core/src/main/java/com/arangodb/entity/LogLevelEntity.java index 39daaf59b..a12372749 100644 --- a/core/src/main/java/com/arangodb/entity/LogLevelEntity.java +++ b/core/src/main/java/com/arangodb/entity/LogLevelEntity.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -466,6 +468,18 @@ public void setGeneral(LogLevel general) { this.general = general; } + @Override + public boolean equals(Object o) { + if (!(o instanceof LogLevelEntity)) return false; + LogLevelEntity that = (LogLevelEntity) o; + return all == that.all && agency == that.agency && agencycomm == that.agencycomm && agencystore == that.agencystore && backup == that.backup && bench == that.bench && cluster == that.cluster && communication == that.communication && authentication == that.authentication && config == that.config && crash == that.crash && dump == that.dump && engines == that.engines && cache == that.cache && security == that.security && startup == that.startup && trx == that.trx && supervision == that.supervision && threads == that.threads && ttl == that.ttl && ssl == that.ssl && replication2 == that.replication2 && restore == that.restore && memory == that.memory && validation == that.validation && statistics == that.statistics && v8 == that.v8 && syscall == that.syscall && libiresearch == that.libiresearch && license == that.license && deprecation == that.deprecation && rocksdb == that.rocksdb && requests == that.requests && repWal == that.repWal && arangosearch == that.arangosearch && views == that.views && repState == that.repState && authorization == that.authorization && queries == that.queries && aql == that.aql && graphs == that.graphs && maintenance == that.maintenance && development == that.development && replication == that.replication && httpclient == that.httpclient && heartbeat == that.heartbeat && flush == that.flush && general == that.general; + } + + @Override + public int hashCode() { + return Objects.hash(all, agency, agencycomm, agencystore, backup, bench, cluster, communication, authentication, config, crash, dump, engines, cache, security, startup, trx, supervision, threads, ttl, ssl, replication2, restore, memory, validation, statistics, v8, syscall, libiresearch, license, deprecation, rocksdb, requests, repWal, arangosearch, views, repState, authorization, queries, aql, graphs, maintenance, development, replication, httpclient, heartbeat, flush, general); + } + public enum LogLevel { FATAL, ERROR, WARNING, INFO, DEBUG, TRACE, DEFAULT } diff --git a/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java b/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java index 95803226f..14c899702 100644 --- a/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java +++ b/core/src/main/java/com/arangodb/entity/MultiDocumentEntity.java @@ -20,16 +20,18 @@ package com.arangodb.entity; +import java.util.ArrayList; import java.util.List; +import java.util.Objects; /** * @author Mark Vollmary */ public final class MultiDocumentEntity { - private List documents; - private List errors; - private List documentsAndErrors; + private List documents = new ArrayList<>(); + private List errors = new ArrayList<>(); + private List documentsAndErrors = new ArrayList<>(); private boolean isPotentialDirtyRead = false; public MultiDocumentEntity() { @@ -80,4 +82,16 @@ public Boolean isPotentialDirtyRead() { public void setPotentialDirtyRead(final Boolean isPotentialDirtyRead) { this.isPotentialDirtyRead = isPotentialDirtyRead; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof MultiDocumentEntity)) return false; + MultiDocumentEntity that = (MultiDocumentEntity) o; + return isPotentialDirtyRead == that.isPotentialDirtyRead && Objects.equals(documents, that.documents) && Objects.equals(errors, that.errors) && Objects.equals(documentsAndErrors, that.documentsAndErrors); + } + + @Override + public int hashCode() { + return Objects.hash(documents, errors, documentsAndErrors, isPotentialDirtyRead); + } } diff --git a/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java b/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java index 83efee47b..83758e581 100644 --- a/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java +++ b/core/src/main/java/com/arangodb/entity/QueryCachePropertiesEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -60,6 +62,18 @@ public void setMaxResults(final Long maxResults) { this.maxResults = maxResults; } + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryCachePropertiesEntity)) return false; + QueryCachePropertiesEntity that = (QueryCachePropertiesEntity) o; + return mode == that.mode && Objects.equals(maxResults, that.maxResults); + } + + @Override + public int hashCode() { + return Objects.hash(mode, maxResults); + } + public enum CacheMode { off, on, demand } diff --git a/core/src/main/java/com/arangodb/entity/QueryEntity.java b/core/src/main/java/com/arangodb/entity/QueryEntity.java index 0965bf33a..9518f5fe4 100644 --- a/core/src/main/java/com/arangodb/entity/QueryEntity.java +++ b/core/src/main/java/com/arangodb/entity/QueryEntity.java @@ -22,6 +22,7 @@ import java.util.Date; import java.util.Map; +import java.util.Objects; /** * @author Mark Vollmary @@ -108,4 +109,16 @@ public QueryExecutionState getState() { public Boolean getStream() { return stream; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryEntity)) return false; + QueryEntity that = (QueryEntity) o; + return Objects.equals(id, that.id) && Objects.equals(database, that.database) && Objects.equals(user, that.user) && Objects.equals(query, that.query) && Objects.equals(bindVars, that.bindVars) && Objects.equals(started, that.started) && Objects.equals(runTime, that.runTime) && Objects.equals(peakMemoryUsage, that.peakMemoryUsage) && state == that.state && Objects.equals(stream, that.stream); + } + + @Override + public int hashCode() { + return Objects.hash(id, database, user, query, bindVars, started, runTime, peakMemoryUsage, state, stream); + } } diff --git a/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java b/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java index e6b543a6d..ef7068af6 100644 --- a/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java +++ b/core/src/main/java/com/arangodb/entity/QueryOptimizerRule.java @@ -1,5 +1,7 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @since ArangoDB 3.10 */ @@ -15,6 +17,18 @@ public Flags getFlags() { return flags; } + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryOptimizerRule)) return false; + QueryOptimizerRule that = (QueryOptimizerRule) o; + return Objects.equals(name, that.name) && Objects.equals(flags, that.flags); + } + + @Override + public int hashCode() { + return Objects.hash(name, flags); + } + public static class Flags { private Boolean hidden; private Boolean clusterOnly; @@ -46,5 +60,17 @@ public Boolean getDisabledByDefault() { public Boolean getEnterpriseOnly() { return enterpriseOnly; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Flags)) return false; + Flags flags = (Flags) o; + return Objects.equals(hidden, flags.hidden) && Objects.equals(clusterOnly, flags.clusterOnly) && Objects.equals(canBeDisabled, flags.canBeDisabled) && Objects.equals(canCreateAdditionalPlans, flags.canCreateAdditionalPlans) && Objects.equals(disabledByDefault, flags.disabledByDefault) && Objects.equals(enterpriseOnly, flags.enterpriseOnly); + } + + @Override + public int hashCode() { + return Objects.hash(hidden, clusterOnly, canBeDisabled, canCreateAdditionalPlans, disabledByDefault, enterpriseOnly); + } } } diff --git a/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java index 80fb242de..52378b408 100644 --- a/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java +++ b/core/src/main/java/com/arangodb/entity/QueryTrackingPropertiesEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -129,4 +131,15 @@ public void setMaxQueryStringLength(final Long maxQueryStringLength) { this.maxQueryStringLength = maxQueryStringLength; } + @Override + public boolean equals(Object o) { + if (!(o instanceof QueryTrackingPropertiesEntity)) return false; + QueryTrackingPropertiesEntity that = (QueryTrackingPropertiesEntity) o; + return Objects.equals(enabled, that.enabled) && Objects.equals(trackSlowQueries, that.trackSlowQueries) && Objects.equals(maxSlowQueries, that.maxSlowQueries) && Objects.equals(slowQueryThreshold, that.slowQueryThreshold) && Objects.equals(maxQueryStringLength, that.maxQueryStringLength); + } + + @Override + public int hashCode() { + return Objects.hash(enabled, trackSlowQueries, maxSlowQueries, slowQueryThreshold, maxQueryStringLength); + } } diff --git a/core/src/main/java/com/arangodb/entity/ShardEntity.java b/core/src/main/java/com/arangodb/entity/ShardEntity.java index 8b853a623..8ba767816 100644 --- a/core/src/main/java/com/arangodb/entity/ShardEntity.java +++ b/core/src/main/java/com/arangodb/entity/ShardEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Michele Rastelli */ @@ -35,4 +37,15 @@ public String getShardId() { return shardId; } + @Override + public boolean equals(Object o) { + if (!(o instanceof ShardEntity)) return false; + ShardEntity that = (ShardEntity) o; + return Objects.equals(shardId, that.shardId); + } + + @Override + public int hashCode() { + return Objects.hashCode(shardId); + } } diff --git a/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java b/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java index 82dd79c7b..674babd71 100644 --- a/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java +++ b/core/src/main/java/com/arangodb/entity/StreamTransactionEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Michele Rastelli * @since ArangoDB 3.5.0 @@ -37,4 +39,15 @@ public StreamTransactionStatus getStatus() { return status; } + @Override + public boolean equals(Object o) { + if (!(o instanceof StreamTransactionEntity)) return false; + StreamTransactionEntity that = (StreamTransactionEntity) o; + return Objects.equals(id, that.id) && status == that.status; + } + + @Override + public int hashCode() { + return Objects.hash(id, status); + } } diff --git a/core/src/main/java/com/arangodb/entity/TransactionEntity.java b/core/src/main/java/com/arangodb/entity/TransactionEntity.java index 2161c5766..043c22819 100644 --- a/core/src/main/java/com/arangodb/entity/TransactionEntity.java +++ b/core/src/main/java/com/arangodb/entity/TransactionEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Michele Rastelli * @since ArangoDB 3.5.0 @@ -37,4 +39,15 @@ public StreamTransactionStatus getState() { return state; } + @Override + public boolean equals(Object o) { + if (!(o instanceof TransactionEntity)) return false; + TransactionEntity that = (TransactionEntity) o; + return Objects.equals(id, that.id) && state == that.state; + } + + @Override + public int hashCode() { + return Objects.hash(id, state); + } } diff --git a/core/src/main/java/com/arangodb/entity/UserEntity.java b/core/src/main/java/com/arangodb/entity/UserEntity.java index 76181ffe4..64b213439 100644 --- a/core/src/main/java/com/arangodb/entity/UserEntity.java +++ b/core/src/main/java/com/arangodb/entity/UserEntity.java @@ -21,6 +21,7 @@ package com.arangodb.entity; import java.util.Map; +import java.util.Objects; /** * @author Mark Vollmary @@ -57,4 +58,15 @@ public Boolean getChangePassword() { return changePassword; } + @Override + public boolean equals(Object o) { + if (!(o instanceof UserEntity)) return false; + UserEntity that = (UserEntity) o; + return Objects.equals(user, that.user) && Objects.equals(active, that.active) && Objects.equals(extra, that.extra) && Objects.equals(changePassword, that.changePassword); + } + + @Override + public int hashCode() { + return Objects.hash(user, active, extra, changePassword); + } } diff --git a/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java b/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java index ef19a4386..22c2f0853 100644 --- a/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java +++ b/core/src/main/java/com/arangodb/entity/VertexUpdateEntity.java @@ -22,6 +22,8 @@ import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -38,4 +40,16 @@ public String getOldRev() { return oldRev; } + @Override + public boolean equals(Object o) { + if (!(o instanceof VertexUpdateEntity)) return false; + if (!super.equals(o)) return false; + VertexUpdateEntity that = (VertexUpdateEntity) o; + return Objects.equals(oldRev, that.oldRev); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oldRev); + } } diff --git a/core/src/main/java/com/arangodb/entity/ViewEntity.java b/core/src/main/java/com/arangodb/entity/ViewEntity.java index e5eb28299..94c7e0a6d 100644 --- a/core/src/main/java/com/arangodb/entity/ViewEntity.java +++ b/core/src/main/java/com/arangodb/entity/ViewEntity.java @@ -20,6 +20,8 @@ package com.arangodb.entity; +import java.util.Objects; + /** * @author Mark Vollmary */ @@ -52,4 +54,15 @@ public ViewType getType() { return type; } + @Override + public boolean equals(Object o) { + if (!(o instanceof ViewEntity)) return false; + ViewEntity that = (ViewEntity) o; + return Objects.equals(id, that.id) && Objects.equals(name, that.name) && type == that.type; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, type); + } } diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java index 5255a3fff..fa729e1e0 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/ArangoSearchPropertiesEntity.java @@ -25,6 +25,7 @@ import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -136,4 +137,17 @@ public Boolean getPrimarySortCache() { public Boolean getPrimaryKeyCache() { return primaryKeyCache; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ArangoSearchPropertiesEntity)) return false; + if (!super.equals(o)) return false; + ArangoSearchPropertiesEntity that = (ArangoSearchPropertiesEntity) o; + return Objects.equals(consolidationIntervalMsec, that.consolidationIntervalMsec) && Objects.equals(commitIntervalMsec, that.commitIntervalMsec) && Objects.equals(cleanupIntervalStep, that.cleanupIntervalStep) && Objects.equals(consolidationPolicy, that.consolidationPolicy) && Objects.equals(primarySort, that.primarySort) && Objects.equals(links, that.links) && primarySortCompression == that.primarySortCompression && Objects.equals(storedValues, that.storedValues) && Objects.equals(optimizeTopK, that.optimizeTopK) && Objects.equals(primarySortCache, that.primarySortCache) && Objects.equals(primaryKeyCache, that.primaryKeyCache); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), consolidationIntervalMsec, commitIntervalMsec, cleanupIntervalStep, consolidationPolicy, primarySort, links, primarySortCompression, storedValues, optimizeTopK, primarySortCache, primaryKeyCache); + } } diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java b/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java index c5901abcc..eb33ebb20 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/CollectionLink.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Objects; /** * @author Mark Vollmary @@ -188,4 +189,15 @@ public Boolean getCache() { return cache; } + @Override + public boolean equals(Object o) { + if (!(o instanceof CollectionLink)) return false; + CollectionLink that = (CollectionLink) o; + return Objects.equals(name, that.name) && Objects.equals(analyzers, that.analyzers) && Objects.equals(includeAllFields, that.includeAllFields) && Objects.equals(trackListPositions, that.trackListPositions) && storeValues == that.storeValues && Objects.equals(fields, that.fields) && Objects.equals(nested, that.nested) && Objects.equals(inBackground, that.inBackground) && Objects.equals(cache, that.cache); + } + + @Override + public int hashCode() { + return Objects.hash(name, analyzers, includeAllFields, trackListPositions, storeValues, fields, nested, inBackground, cache); + } } \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java b/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java index 730996084..1d2f0f8f8 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/FieldLink.java @@ -10,6 +10,7 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Objects; public final class FieldLink { @@ -164,4 +165,16 @@ public Boolean getInBackground() { public Boolean getCache() { return cache; } + + @Override + public boolean equals(Object o) { + if (!(o instanceof FieldLink)) return false; + FieldLink fieldLink = (FieldLink) o; + return Objects.equals(name, fieldLink.name) && Objects.equals(analyzers, fieldLink.analyzers) && Objects.equals(includeAllFields, fieldLink.includeAllFields) && Objects.equals(trackListPositions, fieldLink.trackListPositions) && storeValues == fieldLink.storeValues && Objects.equals(fields, fieldLink.fields) && Objects.equals(nested, fieldLink.nested) && Objects.equals(inBackground, fieldLink.inBackground) && Objects.equals(cache, fieldLink.cache); + } + + @Override + public int hashCode() { + return Objects.hash(name, analyzers, includeAllFields, trackListPositions, storeValues, fields, nested, inBackground, cache); + } } \ No newline at end of file diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java b/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java index ab0d7c872..10b054108 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/PrimarySort.java @@ -20,6 +20,11 @@ package com.arangodb.entity.arangosearch; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + /** * @author Heiko Kernbach */ @@ -28,6 +33,14 @@ public final class PrimarySort { private final String fieldName; private Boolean ascending; + public PrimarySort( + @JsonProperty("field") String field, + @JsonProperty("asc") Boolean asc + ) { + this.fieldName = field; + this.ascending = asc; + } + private PrimarySort(final String fieldName) { super(); this.fieldName = fieldName; @@ -46,11 +59,45 @@ public PrimarySort ascending(final Boolean ascending) { return this; } + @JsonIgnore public Boolean getAscending() { return ascending; } + public Direction getDirection() { + if (ascending == null) { + return null; + } + return ascending ? Direction.asc : Direction.desc; + } + + /** + * @deprecated for removal, use {@link #getField()} instead + */ + @Deprecated + @JsonIgnore public String getFieldName() { + return getField(); + } + + public String getField() { return fieldName; } + + public enum Direction { + asc, + desc + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof PrimarySort)) return false; + PrimarySort that = (PrimarySort) o; + return Objects.equals(fieldName, that.fieldName) && Objects.equals(ascending, that.ascending); + } + + @Override + public int hashCode() { + return Objects.hash(fieldName, ascending); + } } diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java index 1d18076f0..7d92d2768 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasIndex.java @@ -3,6 +3,8 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * @author Michele Rastelli * @since ArabgoDB 3.10 @@ -50,4 +52,16 @@ public OperationType getOperation() { public enum OperationType { add, del } + + @Override + public boolean equals(Object o) { + if (!(o instanceof SearchAliasIndex)) return false; + SearchAliasIndex that = (SearchAliasIndex) o; + return Objects.equals(collection, that.collection) && Objects.equals(index, that.index) && operation == that.operation; + } + + @Override + public int hashCode() { + return Objects.hash(collection, index, operation); + } } diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java index 6a0664214..208c17664 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/SearchAliasPropertiesEntity.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Objects; /** * @author Michele Rastelli @@ -39,4 +40,16 @@ public Collection getIndexes() { return indexes; } + @Override + public boolean equals(Object o) { + if (!(o instanceof SearchAliasPropertiesEntity)) return false; + if (!super.equals(o)) return false; + SearchAliasPropertiesEntity that = (SearchAliasPropertiesEntity) o; + return Objects.equals(indexes, that.indexes); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), indexes); + } } diff --git a/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java b/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java index 4716f5864..d300b7f99 100644 --- a/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java +++ b/core/src/main/java/com/arangodb/entity/arangosearch/StoredValue.java @@ -73,8 +73,7 @@ public Boolean getCache() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (!(o instanceof StoredValue)) return false; StoredValue that = (StoredValue) o; return Objects.equals(fields, that.fields) && compression == that.compression && Objects.equals(cache, that.cache); } diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java index cf5ceeecc..7c7dda594 100644 --- a/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java +++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseAsyncImpl.java @@ -169,7 +169,7 @@ public CompletableFuture getPermissions(final String user) { @Override public CompletableFuture> query( - final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { + final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { final InternalRequest request = queryRequest(query, bindVars, options); final HostHandle hostHandle = new HostHandle(); return executorAsync().execute(() -> request, cursorEntityDeserializer(type), hostHandle) @@ -182,7 +182,7 @@ public CompletableFuture> query(String query, Class } @Override - public CompletableFuture> query(String query, Class type, Map bindVars) { + public CompletableFuture> query(String query, Class type, Map bindVars) { return query(query, type, bindVars, new AqlQueryOptions()); } @@ -219,10 +219,21 @@ public CompletableFuture> cursor(String cursorId, Class @Override public CompletableFuture explainQuery( - final String query, final Map bindVars, final AqlQueryExplainOptions options) { + final String query, final Map bindVars, final AqlQueryExplainOptions options) { return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); } + @Override + public CompletableFuture explainAqlQuery( + String query, Map bindVars, AqlQueryExplainOptions options) { + return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public CompletableFuture explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) { + return executorAsync().execute(() -> explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + @Override public CompletableFuture parseQuery(final String query) { return executorAsync().execute(() -> parseQueryRequest(query), AqlParseEntity.class); diff --git a/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java index 63fec8e66..26649883e 100644 --- a/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java +++ b/core/src/main/java/com/arangodb/internal/ArangoDatabaseImpl.java @@ -162,7 +162,7 @@ public Permissions getPermissions(final String user) { @Override public ArangoCursor query( - final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { + final String query, final Class type, final Map bindVars, final AqlQueryOptions options) { final InternalRequest request = queryRequest(query, bindVars, options); final HostHandle hostHandle = new HostHandle(); final CursorEntity result = executorSync().execute(request, cursorEntityDeserializer(type), hostHandle); @@ -170,7 +170,7 @@ public ArangoCursor query( } @Override - public ArangoCursor query(final String query, final Class type, final Map bindVars) { + public ArangoCursor query(final String query, final Class type, final Map bindVars) { return query(query, type, bindVars, new AqlQueryOptions()); } @@ -240,10 +240,20 @@ public void close(final String id) { @Override public AqlExecutionExplainEntity explainQuery( - final String query, final Map bindVars, final AqlQueryExplainOptions options) { + final String query, final Map bindVars, final AqlQueryExplainOptions options) { return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlExecutionExplainEntity.class); } + @Override + public AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, AqlQueryExplainOptions options) { + return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + + @Override + public AqlQueryExplainEntity explainAqlQuery(String query, Map bindVars, ExplainAqlQueryOptions options) { + return executorSync().execute(explainQueryRequest(query, bindVars, options), AqlQueryExplainEntity.class); + } + @Override public AqlParseEntity parseQuery(final String query) { return executorSync().execute(parseQueryRequest(query), AqlParseEntity.class); diff --git a/core/src/main/java/com/arangodb/internal/ArangoDefaults.java b/core/src/main/java/com/arangodb/internal/ArangoDefaults.java index b08c045da..b54d3d8c0 100644 --- a/core/src/main/java/com/arangodb/internal/ArangoDefaults.java +++ b/core/src/main/java/com/arangodb/internal/ArangoDefaults.java @@ -30,9 +30,12 @@ /** * @author Mark Vollmary + * @author Michele Rastelli */ public final class ArangoDefaults { + private static final int MB = 1024 * 1024; + public static final int INTEGER_BYTES = Integer.SIZE / Byte.SIZE; public static final int LONG_BYTES = Long.SIZE / Byte.SIZE; public static final int CHUNK_MIN_HEADER_SIZE = INTEGER_BYTES + INTEGER_BYTES + LONG_BYTES; @@ -48,8 +51,12 @@ public final class ArangoDefaults { public static final Integer DEFAULT_TIMEOUT = 0; public static final Long DEFAULT_CONNECTION_TTL_HTTP = 30_000L; public static final Boolean DEFAULT_USE_SSL = false; + public static final String DEFAULT_SSL_PROTOCOL = "TLS"; public static final Boolean DEFAULT_VERIFY_HOST = true; public static final Integer DEFAULT_CHUNK_SIZE = 30_000; + public static final Boolean DEFAULT_PIPELINING = false; + public static final Integer DEFAULT_CONNECTION_WINDOW_SIZE = 32 * MB; + public static final Integer DEFAULT_INITIAL_WINDOW_SIZE = 2 * MB; public static final Boolean DEFAULT_ACQUIRE_HOST_LIST = false; public static final Integer DEFAULT_ACQUIRE_HOST_LIST_INTERVAL = 60 * 60 * 1000; // hour public static final LoadBalancingStrategy DEFAULT_LOAD_BALANCING_STRATEGY = LoadBalancingStrategy.NONE; diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java b/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java index 6f054cf5e..f794bcd31 100644 --- a/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java +++ b/core/src/main/java/com/arangodb/internal/InternalArangoCollection.java @@ -32,7 +32,6 @@ import java.lang.reflect.Type; import java.util.ArrayList; import java.util.Collection; -import java.util.List; import static com.arangodb.internal.serde.SerdeUtils.constructParametricType; @@ -111,28 +110,9 @@ private InternalRequest createInsertDocumentRequest(final DocumentCreateOptions protected ResponseDeserializer>> insertDocumentsResponseDeserializer(Class userDataClass) { return (response) -> { - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity<>(); - final List> docs = new ArrayList<>(); - final List errors = new ArrayList<>(); - final List documentsAndErrors = new ArrayList<>(); - final JsonNode body = getSerde().parse(response.getBody()); - for (final JsonNode next : body) { - JsonNode isError = next.get(ArangoResponseField.ERROR_FIELD_NAME); - if (isError != null && isError.booleanValue()) { - final ErrorEntity error = getSerde().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - Type type = constructParametricType(DocumentCreateEntity.class, userDataClass); - final DocumentCreateEntity doc = getSerde().deserialize(next, type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentCreateEntity.class, userDataClass)); + return getSerde().deserialize(response.getBody(), type); }; } @@ -184,31 +164,12 @@ protected InternalRequest getDocumentsRequest(final Iterable keys, final return request; } - protected ResponseDeserializer> getDocumentsResponseDeserializer( - final Class type) { + protected ResponseDeserializer> getDocumentsResponseDeserializer(final Class type) { return (response) -> { - final MultiDocumentEntity multiDocument = new MultiDocumentEntity<>(); + MultiDocumentEntity multiDocument = getSerde().deserialize(response.getBody(), + constructParametricType(MultiDocumentEntity.class, type)); boolean potentialDirtyRead = Boolean.parseBoolean(response.getMeta("X-Arango-Potential-Dirty-Read")); multiDocument.setPotentialDirtyRead(potentialDirtyRead); - final List docs = new ArrayList<>(); - final List errors = new ArrayList<>(); - final List documentsAndErrors = new ArrayList<>(); - final JsonNode body = getSerde().parse(response.getBody()); - for (final JsonNode next : body) { - JsonNode isError = next.get(ArangoResponseField.ERROR_FIELD_NAME); - if (isError != null && isError.booleanValue()) { - final ErrorEntity error = getSerde().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - final T doc = getSerde().deserializeUserData(getSerde().serialize(next), type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); return multiDocument; }; } @@ -250,28 +211,9 @@ private InternalRequest createReplaceDocumentRequest(final DocumentReplaceOption protected ResponseDeserializer>> replaceDocumentsResponseDeserializer( final Class returnType) { return (response) -> { - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity<>(); - final List> docs = new ArrayList<>(); - final List errors = new ArrayList<>(); - final List documentsAndErrors = new ArrayList<>(); - final JsonNode body = getSerde().parse(response.getBody()); - for (final JsonNode next : body) { - JsonNode isError = next.get(ArangoResponseField.ERROR_FIELD_NAME); - if (isError != null && isError.booleanValue()) { - final ErrorEntity error = getSerde().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - Type type = constructParametricType(DocumentUpdateEntity.class, returnType); - final DocumentUpdateEntity doc = getSerde().deserialize(next, type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentUpdateEntity.class, returnType)); + return getSerde().deserialize(response.getBody(), type); }; } @@ -313,28 +255,9 @@ private InternalRequest createUpdateDocumentRequest(final DocumentUpdateOptions protected ResponseDeserializer>> updateDocumentsResponseDeserializer( final Class returnType) { return (response) -> { - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity<>(); - final List> docs = new ArrayList<>(); - final List errors = new ArrayList<>(); - final List documentsAndErrors = new ArrayList<>(); - final JsonNode body = getSerde().parse(response.getBody()); - for (final JsonNode next : body) { - JsonNode isError = next.get(ArangoResponseField.ERROR_FIELD_NAME); - if (isError != null && isError.booleanValue()) { - final ErrorEntity error = getSerde().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - Type type = constructParametricType(DocumentUpdateEntity.class, returnType); - final DocumentUpdateEntity doc = getSerde().deserialize(next, type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentUpdateEntity.class, returnType)); + return getSerde().deserialize(response.getBody(), type); }; } @@ -370,28 +293,9 @@ private InternalRequest createDeleteDocumentRequest(final DocumentDeleteOptions protected ResponseDeserializer>> deleteDocumentsResponseDeserializer( final Class userDataClass) { return (response) -> { - final MultiDocumentEntity> multiDocument = new MultiDocumentEntity<>(); - final List> docs = new ArrayList<>(); - final List errors = new ArrayList<>(); - final List documentsAndErrors = new ArrayList<>(); - final JsonNode body = getSerde().parse(response.getBody()); - for (final JsonNode next : body) { - JsonNode isError = next.get(ArangoResponseField.ERROR_FIELD_NAME); - if (isError != null && isError.booleanValue()) { - final ErrorEntity error = getSerde().deserialize(next, ErrorEntity.class); - errors.add(error); - documentsAndErrors.add(error); - } else { - Type type = constructParametricType(DocumentDeleteEntity.class, userDataClass); - final DocumentDeleteEntity doc = getSerde().deserialize(next, type); - docs.add(doc); - documentsAndErrors.add(doc); - } - } - multiDocument.setDocuments(docs); - multiDocument.setErrors(errors); - multiDocument.setDocumentsAndErrors(documentsAndErrors); - return multiDocument; + Type type = constructParametricType(MultiDocumentEntity.class, + constructParametricType(DocumentDeleteEntity.class, userDataClass)); + return getSerde().deserialize(response.getBody(), type); }; } diff --git a/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java index 32a126f11..135f4d825 100644 --- a/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java +++ b/core/src/main/java/com/arangodb/internal/InternalArangoDatabase.java @@ -140,7 +140,7 @@ protected ResponseDeserializer getPermissionsResponseDeserialzer() Permissions.class); } - protected InternalRequest queryRequest(final String query, final Map bindVars, + protected InternalRequest queryRequest(final String query, final Map bindVars, final AqlQueryOptions options) { final AqlQueryOptions opt = options != null ? options : new AqlQueryOptions(); final InternalRequest request = request(name, RequestType.POST, PATH_API_CURSOR) @@ -172,13 +172,20 @@ protected InternalRequest queryCloseRequest(final String id, final AqlQueryOptio return request; } - protected InternalRequest explainQueryRequest(final String query, final Map bindVars, + protected InternalRequest explainQueryRequest(final String query, final Map bindVars, final AqlQueryExplainOptions options) { final AqlQueryExplainOptions opt = options != null ? options : new AqlQueryExplainOptions(); return request(name, RequestType.POST, PATH_API_EXPLAIN) .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars))); } + protected InternalRequest explainQueryRequest(final String query, final Map bindVars, + final ExplainAqlQueryOptions options) { + final ExplainAqlQueryOptions opt = options != null ? options : new ExplainAqlQueryOptions(); + return request(name, RequestType.POST, PATH_API_EXPLAIN) + .setBody(getSerde().serialize(OptionsBuilder.build(opt, query, bindVars))); + } + protected InternalRequest parseQueryRequest(final String query) { return request(name, RequestType.POST, PATH_API_QUERY).setBody(getSerde().serialize(OptionsBuilder.build(new AqlQueryParseOptions(), query))); } diff --git a/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java b/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java index f38903532..31446515e 100644 --- a/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java +++ b/core/src/main/java/com/arangodb/internal/config/ArangoConfig.java @@ -16,7 +16,12 @@ import com.fasterxml.jackson.databind.Module; import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import java.io.ByteArrayInputStream; import java.lang.reflect.InvocationTargetException; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; import java.util.*; import java.util.concurrent.Executor; import java.util.stream.Collectors; @@ -30,9 +35,15 @@ public class ArangoConfig { private String password; private String jwt; private Boolean useSsl; + private Optional sslCertValue; + private Optional sslAlgorithm; + private String sslProtocol; private SSLContext sslContext; private Boolean verifyHost; private Integer chunkSize; + private Boolean pipelining; + private Integer connectionWindowSize; + private Integer initialWindowSize; private Integer maxConnections; private Long connectionTtl; private Integer keepAliveInterval; @@ -68,8 +79,14 @@ public void loadProperties(final ArangoConfigProperties properties) { // FIXME: make jwt field Optional jwt = properties.getJwt().orElse(null); useSsl = properties.getUseSsl().orElse(ArangoDefaults.DEFAULT_USE_SSL); + sslCertValue = properties.getSslCertValue(); + sslAlgorithm = properties.getSslAlgorithm(); + sslProtocol = properties.getSslProtocol().orElse(ArangoDefaults.DEFAULT_SSL_PROTOCOL); verifyHost = properties.getVerifyHost().orElse(ArangoDefaults.DEFAULT_VERIFY_HOST); chunkSize = properties.getChunkSize().orElse(ArangoDefaults.DEFAULT_CHUNK_SIZE); + pipelining = properties.getPipelining().orElse(ArangoDefaults.DEFAULT_PIPELINING); + connectionWindowSize = properties.getConnectionWindowSize().orElse(ArangoDefaults.DEFAULT_CONNECTION_WINDOW_SIZE); + initialWindowSize = properties.getInitialWindowSize().orElse(ArangoDefaults.DEFAULT_INITIAL_WINDOW_SIZE); // FIXME: make maxConnections field Optional maxConnections = properties.getMaxConnections().orElse(null); // FIXME: make connectionTtl field Optional @@ -149,7 +166,22 @@ public void setUseSsl(Boolean useSsl) { this.useSsl = useSsl; } + public void setSslCertValue(String sslCertValue) { + this.sslCertValue = Optional.ofNullable(sslCertValue); + } + + public void setSslAlgorithm(String sslAlgorithm) { + this.sslAlgorithm = Optional.ofNullable(sslAlgorithm); + } + + public void setSslProtocol(String sslProtocol) { + this.sslProtocol = sslProtocol; + } + public SSLContext getSslContext() { + if (sslContext == null) { + sslContext = createSslContext(); + } return sslContext; } @@ -173,6 +205,30 @@ public void setChunkSize(Integer chunkSize) { this.chunkSize = chunkSize; } + public Boolean getPipelining() { + return pipelining; + } + + public void setPipelining(Boolean pipelining) { + this.pipelining = pipelining; + } + + public Integer getConnectionWindowSize() { + return connectionWindowSize; + } + + public void setConnectionWindowSize(Integer connectionWindowSize) { + this.connectionWindowSize = connectionWindowSize; + } + + public Integer getInitialWindowSize() { + return initialWindowSize; + } + + public void setInitialWindowSize(Integer initialWindowSize) { + this.initialWindowSize = initialWindowSize; + } + public Integer getMaxConnections() { if (maxConnections == null) { maxConnections = getDefaultMaxConnections(); @@ -332,4 +388,26 @@ public ProtocolConfig getProtocolConfig() { public void setProtocolConfig(ProtocolConfig protocolConfig) { this.protocolConfig = protocolConfig; } + + private SSLContext createSslContext() { + try { + if (sslCertValue.isPresent()) { + ByteArrayInputStream is = new ByteArrayInputStream(Base64.getDecoder().decode(sslCertValue.get())); + Certificate cert = CertificateFactory.getInstance("X.509").generateCertificate(is); + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + ks.load(null); + ks.setCertificateEntry("arangodb", cert); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(sslAlgorithm.orElseGet(TrustManagerFactory::getDefaultAlgorithm)); + tmf.init(ks); + SSLContext sc = SSLContext.getInstance(sslProtocol); + sc.init(null, tmf.getTrustManagers(), null); + return sc; + } else { + return SSLContext.getDefault(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } diff --git a/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java b/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java index 614119ed3..fad7abd6e 100644 --- a/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java +++ b/core/src/main/java/com/arangodb/internal/config/ArangoConfigPropertiesImpl.java @@ -31,22 +31,22 @@ public ArangoConfigPropertiesImpl(final String fileName) { this(fileName, DEFAULT_PREFIX); } - public ArangoConfigPropertiesImpl(final String fileName, final String prefix) { - properties = initProperties(fileName); - this.prefix = initPrefix(prefix); + public ArangoConfigPropertiesImpl(final String fileName, final String prefix) { + this(initProperties(fileName), prefix); } - private String initPrefix(String p) { - if (p == null) { - return ""; - } else { - return p + "."; - } + public ArangoConfigPropertiesImpl(final Properties properties) { + this(properties, DEFAULT_PREFIX); + } + + public ArangoConfigPropertiesImpl(final Properties properties, final String prefix) { + this.properties = properties; + this.prefix = initPrefix(prefix); } - private Properties initProperties(String fileName) { + private static Properties initProperties(String fileName) { Properties p = new Properties(); - try (InputStream is = getClass().getClassLoader().getResourceAsStream(fileName)) { + try (InputStream is = ArangoConfigPropertiesImpl.class.getClassLoader().getResourceAsStream(fileName)) { p.load(is); } catch (Exception e) { throw ArangoDBException.of("Got exception while reading properties file " + fileName, e); @@ -54,13 +54,21 @@ private Properties initProperties(String fileName) { return p; } + private String initPrefix(String p) { + if (p == null) { + return ""; + } else { + return p + "."; + } + } + private String getProperty(String key) { return properties.getProperty(prefix + key); } @Override public Optional> getHosts() { - return Optional.ofNullable(getProperty("hosts")) + return Optional.ofNullable(getProperty(KEY_HOSTS)) .map(s -> { List hostDescriptions = new ArrayList<>(); String[] hosts = s.split(","); @@ -73,97 +81,134 @@ public Optional> getHosts() { @Override public Optional getProtocol() { - return Optional.ofNullable(getProperty("protocol")).map(Protocol::valueOf); + return Optional.ofNullable(getProperty(KEY_PROTOCOL)).map(Protocol::valueOf); } @Override public Optional getUser() { - return Optional.ofNullable(getProperty("user")); + return Optional.ofNullable(getProperty(KEY_USER)); } @Override public Optional getPassword() { - return Optional.ofNullable(getProperty("password")); + return Optional.ofNullable(getProperty(KEY_PASSWORD)); } @Override public Optional getJwt() { - return Optional.ofNullable(getProperty("jwt")); + return Optional.ofNullable(getProperty(KEY_JWT)); } @Override public Optional getTimeout() { - return Optional.ofNullable(getProperty("timeout")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_TIMEOUT)).map(Integer::valueOf); } @Override public Optional getUseSsl() { - return Optional.ofNullable(getProperty("useSsl")).map(Boolean::valueOf); + return Optional.ofNullable(getProperty(KEY_USE_SSL)).map(Boolean::valueOf); + } + + @Override + public Optional getSslCertValue() { + return Optional.ofNullable(getProperty(KEY_SSL_CERT_VALUE)); + } + + @Override + public Optional getSslAlgorithm() { + return Optional.ofNullable(getProperty(KEY_SSL_ALGORITHM)); + } + + @Override + public Optional getSslProtocol() { + return Optional.ofNullable(getProperty(KEY_SSL_PROTOCOL)); } @Override public Optional getVerifyHost() { - return Optional.ofNullable(getProperty("verifyHost")).map(Boolean::valueOf); + return Optional.ofNullable(getProperty(KEY_VERIFY_HOST)).map(Boolean::valueOf); } @Override public Optional getChunkSize() { - return Optional.ofNullable(getProperty("chunkSize")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_CHUNK_SIZE)).map(Integer::valueOf); + } + + @Override + public Optional getPipelining() { + return Optional.ofNullable(getProperty(KEY_PIPELINING)).map(Boolean::valueOf); + } + + @Override + public Optional getConnectionWindowSize() { + return Optional.ofNullable(getProperty(KEY_CONNECTION_WINDOW_SIZE)).map(Integer::valueOf); + } + + @Override + public Optional getInitialWindowSize() { + return Optional.ofNullable(getProperty(KEY_INITIAL_WINDOW_SIZE)).map(Integer::valueOf); } @Override public Optional getMaxConnections() { - return Optional.ofNullable(getProperty("maxConnections")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_MAX_CONNECTIONS)).map(Integer::valueOf); } @Override public Optional getConnectionTtl() { - return Optional.ofNullable(getProperty("connectionTtl")).map(Long::valueOf); + return Optional.ofNullable(getProperty(KEY_CONNECTION_TTL)).map(Long::valueOf); } @Override public Optional getKeepAliveInterval() { - return Optional.ofNullable(getProperty("keepAliveInterval")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_KEEP_ALIVE_INTERVAL)).map(Integer::valueOf); } @Override public Optional getAcquireHostList() { - return Optional.ofNullable(getProperty("acquireHostList")).map(Boolean::valueOf); + return Optional.ofNullable(getProperty(KEY_ACQUIRE_HOST_LIST)).map(Boolean::valueOf); } @Override public Optional getAcquireHostListInterval() { - return Optional.ofNullable(getProperty("acquireHostListInterval")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_ACQUIRE_HOST_LIST_INTERVAL)).map(Integer::valueOf); } @Override public Optional getLoadBalancingStrategy() { - return Optional.ofNullable(getProperty("loadBalancingStrategy")).map(LoadBalancingStrategy::valueOf); + return Optional.ofNullable(getProperty(KEY_LOAD_BALANCING_STRATEGY)).map(LoadBalancingStrategy::valueOf); } @Override public Optional getResponseQueueTimeSamples() { - return Optional.ofNullable(getProperty("responseQueueTimeSamples")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_RESPONSE_QUEUE_TIME_SAMPLES)).map(Integer::valueOf); } @Override public Optional getCompression() { - return Optional.ofNullable(getProperty("compression")).map(Compression::valueOf); + return Optional.ofNullable(getProperty(KEY_COMPRESSION)).map(Compression::valueOf); } @Override public Optional getCompressionThreshold() { - return Optional.ofNullable(getProperty("compressionThreshold")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_COMPRESSION_THRESHOLD)).map(Integer::valueOf); } @Override public Optional getCompressionLevel() { - return Optional.ofNullable(getProperty("compressionLevel")).map(Integer::valueOf); + return Optional.ofNullable(getProperty(KEY_COMPRESSION_LEVEL)).map(Integer::valueOf); } @Override public Optional getSerdeProviderClass() { - return Optional.ofNullable(getProperty("serdeProviderClass")); + return Optional.ofNullable(getProperty(KEY_SERDE_PROVIDER_CLASS)); } + @Override + public String toString() { + return "ArangoConfigPropertiesImpl{" + + "prefix='" + prefix + '\'' + + ", properties=" + properties + + '}'; + } } diff --git a/core/src/main/java/com/arangodb/internal/net/Communication.java b/core/src/main/java/com/arangodb/internal/net/Communication.java index cf12dd9a3..26251e33d 100644 --- a/core/src/main/java/com/arangodb/internal/net/Communication.java +++ b/core/src/main/java/com/arangodb/internal/net/Communication.java @@ -50,15 +50,16 @@ public CompletableFuture executeAsync(final InternalRequest re private CompletableFuture executeAsync(final InternalRequest request, final HostHandle hostHandle, final Host host, final int attemptCount) { long reqId = reqCount.getAndIncrement(); - return doExecuteAsync(request, hostHandle, host, attemptCount, host.connection(), reqId); + return host.connection().thenCompose(c -> + doExecuteAsync(request, hostHandle, host, attemptCount, c, reqId) + .whenComplete((r, t) -> c.release())); } private CompletableFuture doExecuteAsync( final InternalRequest request, final HostHandle hostHandle, final Host host, final int attemptCount, Connection connection, long reqId ) { if (LOGGER.isDebugEnabled()) { - String body = request.getBody() == null ? "" : serde.toJsonString(request.getBody()); - LOGGER.debug("Send Request [id={}]: {} {}", reqId, request, body); + LOGGER.debug("Send Request [id={}]: {} {}", reqId, request, serde.toJsonString(request.getBody())); } final CompletableFuture rfuture = new CompletableFuture<>(); try { @@ -84,8 +85,7 @@ private CompletableFuture doExecuteAsync( handleException(isSafe(request), e, hostHandle, request, host, reqId, attemptCount, rfuture); } else { if (LOGGER.isDebugEnabled()) { - String body = response.getBody() == null ? "" : serde.toJsonString(response.getBody()); - LOGGER.debug("Received Response [id={}]: {} {}", reqId, response, body); + LOGGER.debug("Received Response [id={}]: {} {}", reqId, response, serde.toJsonString(response.getBody())); } ArangoDBException errorEntityEx = ResponseUtils.translateError(serde, response); if (errorEntityEx instanceof ArangoDBRedirectException) { diff --git a/core/src/main/java/com/arangodb/internal/net/Connection.java b/core/src/main/java/com/arangodb/internal/net/Connection.java index b092448d3..461c5ccea 100644 --- a/core/src/main/java/com/arangodb/internal/net/Connection.java +++ b/core/src/main/java/com/arangodb/internal/net/Connection.java @@ -35,4 +35,6 @@ public interface Connection extends Closeable { void setJwt(String jwt); CompletableFuture executeAsync(InternalRequest request); + + void release(); } diff --git a/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java b/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java index b0fbbdf7b..0e01ca824 100644 --- a/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionFactory.java @@ -29,5 +29,5 @@ */ @UsedInApi public interface ConnectionFactory { - Connection create(ArangoConfig config, HostDescription host); + Connection create(ArangoConfig config, HostDescription host, ConnectionPool pool); } diff --git a/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java b/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java index 91c12bb02..0db87c0c3 100644 --- a/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionPool.java @@ -20,18 +20,22 @@ package com.arangodb.internal.net; -import com.arangodb.config.HostDescription; +import com.arangodb.arch.UsedInApi; import java.io.Closeable; +import java.util.concurrent.CompletableFuture; /** * @author Mark Vollmary */ +@UsedInApi public interface ConnectionPool extends Closeable { - Connection createConnection(final HostDescription host); + Connection createConnection(); - Connection connection(); + CompletableFuture connection(); + + void release(final Connection connection); void setJwt(String jwt); diff --git a/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java b/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java index 8337a67bf..9f22ee50a 100644 --- a/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java +++ b/core/src/main/java/com/arangodb/internal/net/ConnectionPoolImpl.java @@ -23,24 +23,28 @@ import com.arangodb.ArangoDBException; import com.arangodb.config.HostDescription; import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.util.AsyncQueue; import java.io.IOException; -import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CopyOnWriteArrayList; -/** - * @author Mark Vollmary - */ public class ConnectionPoolImpl implements ConnectionPool { + public static final int HTTP1_SLOTS = 1; // HTTP/1: max 1 pending request + public static final int HTTP1_SLOTS_PIPELINING = 10; // HTTP/1: max pipelining + public static final int HTTP2_SLOTS = 32; // HTTP/2: max streams, hard-coded see BTS-2049 + + private final AsyncQueue slots = new AsyncQueue<>(); private final HostDescription host; private final ArangoConfig config; private final int maxConnections; private final List connections; private final ConnectionFactory factory; - private int current; + private final int maxSlots; private volatile String jwt = null; - private boolean closed = false; + private volatile boolean closed = false; public ConnectionPoolImpl(final HostDescription host, final ArangoConfig config, final ConnectionFactory factory) { super(); @@ -48,39 +52,48 @@ public ConnectionPoolImpl(final HostDescription host, final ArangoConfig config, this.config = config; this.maxConnections = config.getMaxConnections(); this.factory = factory; - connections = new ArrayList<>(); - current = 0; + connections = new CopyOnWriteArrayList<>(); + switch (config.getProtocol()) { + case HTTP_JSON: + case HTTP_VPACK: + maxSlots = config.getPipelining() ? HTTP1_SLOTS_PIPELINING : HTTP1_SLOTS; + break; + default: + maxSlots = HTTP2_SLOTS; + } } @Override - public Connection createConnection(final HostDescription host) { - Connection c = factory.create(config, host); + public Connection createConnection() { + Connection c = factory.create(config, host, this); c.setJwt(jwt); return c; } @Override - public synchronized Connection connection() { + public CompletableFuture connection() { if (closed) { throw new ArangoDBException("Connection pool already closed!"); } - final Connection connection; - if (connections.size() < maxConnections) { - connection = createConnection(host); + Connection connection = createConnection(); connections.add(connection); - current++; - } else { - final int index = Math.floorMod(current++, connections.size()); - connection = connections.get(index); + for (int i = 0; i < maxSlots; i++) { + slots.offer((connection)); + } } - return connection; + return slots.poll(); + } + + @Override + public void release(Connection connection) { + slots.offer(connection); } @Override - public synchronized void setJwt(String jwt) { + public void setJwt(String jwt) { if (jwt != null) { this.jwt = jwt; for (Connection connection : connections) { @@ -90,18 +103,17 @@ public synchronized void setJwt(String jwt) { } @Override - public synchronized void close() throws IOException { + public void close() throws IOException { closed = true; for (final Connection connection : connections) { connection.close(); } - connections.clear(); } @Override public String toString() { return "ConnectionPoolImpl [host=" + host + ", maxConnections=" + maxConnections + ", connections=" - + connections.size() + ", current=" + current + ", factory=" + factory.getClass().getSimpleName() + "]"; + + connections.size() + ", factory=" + factory.getClass().getSimpleName() + "]"; } } diff --git a/core/src/main/java/com/arangodb/internal/net/Host.java b/core/src/main/java/com/arangodb/internal/net/Host.java index 07fd3c6ee..b2afdd8e1 100644 --- a/core/src/main/java/com/arangodb/internal/net/Host.java +++ b/core/src/main/java/com/arangodb/internal/net/Host.java @@ -24,6 +24,7 @@ import com.arangodb.config.HostDescription; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Mark Vollmary @@ -33,9 +34,7 @@ public interface Host { HostDescription getDescription(); - Connection connection(); - - void closeOnError(); + CompletableFuture connection(); void close() throws IOException; @@ -44,5 +43,4 @@ public interface Host { void setMarkforDeletion(boolean markforDeletion); void setJwt(String jwt); - } diff --git a/core/src/main/java/com/arangodb/internal/net/HostImpl.java b/core/src/main/java/com/arangodb/internal/net/HostImpl.java index 1ef822618..0277f8246 100644 --- a/core/src/main/java/com/arangodb/internal/net/HostImpl.java +++ b/core/src/main/java/com/arangodb/internal/net/HostImpl.java @@ -20,10 +20,10 @@ package com.arangodb.internal.net; -import com.arangodb.ArangoDBException; import com.arangodb.config.HostDescription; import java.io.IOException; +import java.util.concurrent.CompletableFuture; /** * @author Mark Vollmary @@ -51,19 +51,10 @@ public HostDescription getDescription() { } @Override - public Connection connection() { + public CompletableFuture connection() { return connectionPool.connection(); } - @Override - public void closeOnError() { - try { - connectionPool.close(); - } catch (final IOException e) { - throw ArangoDBException.of(e); - } - } - @Override public String toString() { return "HostImpl [connectionPool=" + connectionPool + ", description=" + description + ", markforDeletion=" diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java b/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java index 6062a86a1..20b3ce3b7 100644 --- a/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java +++ b/core/src/main/java/com/arangodb/internal/serde/InternalDeserializers.java @@ -9,6 +9,7 @@ import com.arangodb.util.RawBytes; import com.arangodb.util.RawJson; import com.arangodb.internal.InternalResponse; +import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.TreeNode; @@ -17,8 +18,9 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.*; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.StringWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; @@ -29,20 +31,23 @@ public final class InternalDeserializers { static final JsonDeserializer RAW_JSON_DESERIALIZER = new JsonDeserializer() { @Override public RawJson deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { - // TODO: find a way to access raw bytes directly - return RawJson.of(SerdeUtils.INSTANCE.writeJson(p.readValueAsTree())); + if (JsonFactory.FORMAT_NAME_JSON.equals(p.getCodec().getFactory().getFormatName())) { + return RawJson.of(new String(SerdeUtils.extractBytes(p), StandardCharsets.UTF_8)); + } else { + StringWriter w = new StringWriter(); + try (JsonGenerator gen = SerdeUtils.INSTANCE.getJsonMapper().getFactory().createGenerator(w)) { + gen.copyCurrentStructure(p); + gen.flush(); + } + return RawJson.of(w.toString()); + } } }; static final JsonDeserializer RAW_BYTES_DESERIALIZER = new JsonDeserializer() { @Override public RawBytes deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { - // TODO: find a way to access raw bytes directly - ByteArrayOutputStream os = new ByteArrayOutputStream(); - try (JsonGenerator g = p.getCodec().getFactory().createGenerator(os)) { - g.writeTree(p.readValueAsTree()); - } - return RawBytes.of(os.toByteArray()); + return RawBytes.of(SerdeUtils.extractBytes(p)); } }; @@ -150,5 +155,4 @@ public String deserialize(JsonParser p, DeserializationContext ctxt) throws IOEx } } - } diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalModule.java b/core/src/main/java/com/arangodb/internal/serde/InternalModule.java index 3aeae3c24..392a9c334 100644 --- a/core/src/main/java/com/arangodb/internal/serde/InternalModule.java +++ b/core/src/main/java/com/arangodb/internal/serde/InternalModule.java @@ -3,6 +3,7 @@ import com.arangodb.entity.CollectionStatus; import com.arangodb.entity.CollectionType; import com.arangodb.entity.InvertedIndexPrimarySort; +import com.arangodb.entity.MultiDocumentEntity; import com.arangodb.entity.ReplicationFactor; import com.arangodb.util.RawBytes; import com.arangodb.util.RawJson; @@ -11,18 +12,14 @@ import com.fasterxml.jackson.databind.Module; import com.fasterxml.jackson.databind.module.SimpleModule; -import java.util.function.Supplier; +class InternalModule { -enum InternalModule implements Supplier { - INSTANCE; + static Module get(InternalSerde serde) { + SimpleModule module = new SimpleModule(); - private final SimpleModule module; - - InternalModule() { - module = new SimpleModule(); + module.addDeserializer(MultiDocumentEntity.class, new MultiDocumentEntityDeserializer(serde)); module.addSerializer(RawJson.class, InternalSerializers.RAW_JSON_SERIALIZER); - module.addSerializer(RawBytes.class, InternalSerializers.RAW_BYTES_SERIALIZER); module.addSerializer(InternalRequest.class, InternalSerializers.REQUEST); module.addSerializer(CollectionType.class, InternalSerializers.COLLECTION_TYPE); @@ -33,11 +30,7 @@ enum InternalModule implements Supplier { module.addDeserializer(ReplicationFactor.class, InternalDeserializers.REPLICATION_FACTOR); module.addDeserializer(InternalResponse.class, InternalDeserializers.RESPONSE); module.addDeserializer(InvertedIndexPrimarySort.Field.class, InternalDeserializers.INVERTED_INDEX_PRIMARY_SORT_FIELD); - } - @Override - public Module get() { return module; } - } diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java index 758cc550a..1459e9970 100644 --- a/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerde.java @@ -3,6 +3,7 @@ import com.arangodb.arch.UsedInApi; import com.arangodb.serde.ArangoSerde; import com.arangodb.ContentType; +import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.JsonNode; import java.lang.reflect.Type; @@ -15,6 +16,7 @@ public interface InternalSerde extends ArangoSerde { * * @param content byte array * @return JSON string + * @implSpec return {@code "[Unparsable data]"} in case of parsing exception */ String toJsonString(byte[] content); @@ -58,14 +60,6 @@ default T deserialize(JsonNode node, Class clazz) { */ T deserialize(JsonNode node, Type type); - /** - * Parses the content. - * - * @param content VPack or byte encoded JSON string - * @return root of the parsed tree - */ - JsonNode parse(byte[] content); - /** * Parses the content at json pointer. * @@ -98,7 +92,7 @@ default T deserialize(byte[] content, String jsonPointer, Class clazz) { * @return deserialized object */ default T deserialize(byte[] content, String jsonPointer, Type type) { - return deserialize(parse(content, jsonPointer), type); + return deserialize(extract(content, jsonPointer), type); } /** @@ -130,30 +124,17 @@ default T deserialize(byte[] content, String jsonPointer, Type type) { * Deserializes the content and binds it to the target data type, using the user serde. * * @param content byte array to deserialize - * @param type target data type + * @param clazz class of target data type * @return deserialized object */ - T deserializeUserData(byte[] content, Type type); + T deserializeUserData(byte[] content, JavaType clazz); - /** - * Deserializes the parsed json node and binds it to the target data type, using the user serde. - * - * @param node parsed json node - * @param clazz class of target data type - * @return deserialized object - */ - default T deserializeUserData(JsonNode node, Class clazz) { - return deserializeUserData(node, (Type) clazz); - } /** - * Deserializes the parsed json node and binds it to the target data type, using the user serde. - * - * @param node parsed json node - * @param type target data type - * @return deserialized object + * @param content byte array to deserialize + * @return whether the content represents a document (i.e. it has at least one field name equal to _id, _key, _rev) */ - T deserializeUserData(JsonNode node, Type type); + boolean isDocument(byte[] content); /** * @return the user serde diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java index 829563155..8bd24ba31 100644 --- a/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerdeImpl.java @@ -1,28 +1,37 @@ package com.arangodb.internal.serde; import com.arangodb.ArangoDBException; -import com.arangodb.entity.BaseDocument; -import com.arangodb.entity.BaseEdgeDocument; import com.arangodb.internal.RequestContextHolder; import com.arangodb.serde.ArangoSerde; import com.arangodb.util.RawBytes; import com.arangodb.util.RawJson; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.Module; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.datatype.jsonp.JSONPModule; +import jakarta.json.JsonException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.lang.reflect.Type; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; +import java.nio.charset.StandardCharsets; import static com.arangodb.internal.serde.SerdeUtils.checkSupportedJacksonVersion; +import static com.arangodb.internal.serde.SerdeUtils.extractBytes; final class InternalSerdeImpl implements InternalSerde { + private static final Logger LOG = LoggerFactory.getLogger(InternalSerdeImpl.class); static { checkSupportedJacksonVersion(); @@ -36,7 +45,8 @@ final class InternalSerdeImpl implements InternalSerde { this.userSerde = userSerde; mapper.deactivateDefaultTyping(); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); - mapper.registerModule(InternalModule.INSTANCE.get()); + mapper.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION); + mapper.registerModule(InternalModule.get(this)); if (protocolModule != null) { mapper.registerModule(protocolModule); } @@ -45,6 +55,13 @@ final class InternalSerdeImpl implements InternalSerde { new UserDataSerializer(this), new UserDataDeserializer(this) )); + + // JSON-P datatypes + try { + mapper.registerModule(new JSONPModule()); + } catch (JsonException e) { + LOG.debug("Jakarta JSON-P provider not found, handling of JSON-P datatypes is disabled", e); + } } @Override @@ -63,27 +80,48 @@ public T deserialize(byte[] content, Class clazz) { @Override public String toJsonString(final byte[] content) { + if (content == null) { + return ""; + } try { return SerdeUtils.INSTANCE.writeJson(mapper.readTree(content)); - } catch (IOException e) { - throw ArangoDBException.of(e); + } catch (Exception e) { + return "[Unparsable data]"; } } @Override public byte[] extract(final byte[] content, final String jsonPointer) { - try { - JsonNode target = parse(content).at(jsonPointer); - return mapper.writeValueAsBytes(target); - } catch (IOException e) { - throw ArangoDBException.of(e); + if (!jsonPointer.startsWith("/")) { + throw new ArangoDBException("Unsupported JSON pointer: " + jsonPointer); } - } - - @Override - public JsonNode parse(byte[] content) { - try { - return mapper.readTree(content); + String[] parts = jsonPointer.substring(1).split("/"); + try (JsonParser parser = mapper.getFactory().createParser(content)) { + int match = 0; + int level = 0; + JsonToken token = parser.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new ArangoDBException("Unable to parse token: " + token); + } + while (true) { + token = parser.nextToken(); + if (token == JsonToken.START_OBJECT) { + level++; + } + if (token == JsonToken.END_OBJECT) { + level--; + } + if (token == null || level < match) { + throw new ArangoDBException("Unable to parse JSON pointer: " + jsonPointer); + } + if (token == JsonToken.FIELD_NAME && match == level && parts[match].equals(parser.getText())) { + match++; + if (match == parts.length) { + parser.nextToken(); + return extractBytes(parser); + } + } + } } catch (IOException e) { throw ArangoDBException.of(e); } @@ -104,7 +142,11 @@ public byte[] serializeUserData(Object value) { return serialize(null); } Class clazz = value.getClass(); - if (isManagedClass(clazz)) { + if (RawBytes.class.equals(clazz)) { + return ((RawBytes) value).get(); + } else if (RawJson.class.equals(clazz) && JsonFactory.FORMAT_NAME_JSON.equals(mapper.getFactory().getFormatName())) { + return ((RawJson) value).get().getBytes(StandardCharsets.UTF_8); + } else if (SerdeUtils.isManagedClass(clazz)) { return serialize(value); } else { return userSerde.serialize(value); @@ -113,16 +155,23 @@ public byte[] serializeUserData(Object value) { @Override public byte[] serializeCollectionUserData(Iterable value) { - List jsonNodeCollection = StreamSupport.stream(value.spliterator(), false) - .map(this::serializeUserData) - .map(this::parse) - .collect(Collectors.toList()); - return serialize(jsonNodeCollection); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + try (JsonGenerator gen = mapper.getFactory().createGenerator(os)) { + gen.writeStartArray(); + for (Object o : value) { + gen.writeRawValue(new RawUserDataValue(serializeUserData(o))); + } + gen.writeEndArray(); + gen.flush(); + } catch (IOException e) { + throw ArangoDBException.of(e); + } + return os.toByteArray(); } @Override public T deserializeUserData(byte[] content, Class clazz) { - if (isManagedClass(clazz)) { + if (SerdeUtils.isManagedClass(clazz)) { return deserialize(content, clazz); } else { return userSerde.deserialize(content, clazz, RequestContextHolder.INSTANCE.getCtx()); @@ -131,17 +180,48 @@ public T deserializeUserData(byte[] content, Class clazz) { @Override @SuppressWarnings("unchecked") - public T deserializeUserData(byte[] content, Type type) { - if (type instanceof Class) { - return deserializeUserData(content, (Class) type); - } else { - throw new UnsupportedOperationException(); + public T deserializeUserData(byte[] content, JavaType clazz) { + try { + if (SerdeUtils.isManagedClass(clazz.getRawClass())) { + return mapper.readerFor(clazz).readValue(content); + } else { + return deserializeUserData(content, (Class) clazz.getRawClass()); + } + } catch (IOException e) { + throw ArangoDBException.of(e); } } @Override - public T deserializeUserData(JsonNode node, Type type) { - return deserializeUserData(serialize(node), type); + public boolean isDocument(byte[] content) { + try (JsonParser p = mapper.getFactory().createParser(content)) { + if (p.nextToken() != JsonToken.START_OBJECT) { + return false; + } + + int level = 1; + while (level >= 1) { + JsonToken t = p.nextToken(); + if (level == 1 && t == JsonToken.FIELD_NAME) { + String fieldName = p.getText(); + if (fieldName.equals("_id") || fieldName.equals("_key") || fieldName.equals("_rev")) { + return true; + } + } + if (t.isStructStart()) { + level++; + } else if (t.isStructEnd()) { + level--; + } + } + + if (p.currentToken() != JsonToken.END_OBJECT) { + throw new JsonMappingException(p, "Expected END_OBJECT but got " + p.currentToken()); + } + } catch (IOException e) { + throw ArangoDBException.of(e); + } + return false; } @Override @@ -159,31 +239,22 @@ public T deserialize(final JsonNode node, final Type type) { } @Override + @SuppressWarnings("unchecked") public T deserialize(final byte[] content, final Type type) { if (content == null || content.length == 0) { return null; } - try { - return mapper.readerFor(mapper.constructType(type)).readValue(content); - } catch (IOException e) { - throw ArangoDBException.of(e); + if (RawBytes.class.equals(type)) { + return (T) RawBytes.of(content); + } else if (RawJson.class.equals(type) && JsonFactory.FORMAT_NAME_JSON.equals(mapper.getFactory().getFormatName())) { + return (T) RawJson.of(new String(content, StandardCharsets.UTF_8)); + } else { + try { + return mapper.readerFor(mapper.constructType(type)).readValue(content); + } catch (IOException e) { + throw ArangoDBException.of(e); + } } } - private boolean isManagedClass(Class clazz) { - return JsonNode.class.isAssignableFrom(clazz) || - RawJson.class.equals(clazz) || - RawBytes.class.equals(clazz) || - BaseDocument.class.equals(clazz) || - BaseEdgeDocument.class.equals(clazz) || - isEntityClass(clazz); - } - - private boolean isEntityClass(Class clazz) { - Package pkg = clazz.getPackage(); - if (pkg == null) { - return false; - } - return pkg.getName().startsWith("com.arangodb.entity"); - } } diff --git a/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java b/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java index fa746fedd..db156dff8 100644 --- a/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java +++ b/core/src/main/java/com/arangodb/internal/serde/InternalSerializers.java @@ -4,15 +4,16 @@ import com.arangodb.entity.arangosearch.CollectionLink; import com.arangodb.entity.arangosearch.FieldLink; import com.arangodb.internal.ArangoRequestParam; -import com.arangodb.util.RawBytes; import com.arangodb.util.RawJson; import com.arangodb.internal.InternalRequest; +import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -23,16 +24,13 @@ public final class InternalSerializers { static final JsonSerializer RAW_JSON_SERIALIZER = new JsonSerializer() { @Override public void serialize(RawJson value, JsonGenerator gen, SerializerProvider serializers) throws IOException { - gen.writeTree(SerdeUtils.INSTANCE.parseJson(value.get())); - } - }; - static final JsonSerializer RAW_BYTES_SERIALIZER = new JsonSerializer() { - @Override - public void serialize(RawBytes value, JsonGenerator gen, SerializerProvider serializers) throws IOException { - // TODO: find a way to append raw bytes directly - // see https://github.com/FasterXML/jackson-core/issues/914 - try (JsonParser parser = gen.getCodec().getFactory().createParser(value.get())) { - gen.writeTree(parser.readValueAsTree()); + if (JsonFactory.FORMAT_NAME_JSON.equals(gen.getCodec().getFactory().getFormatName())) { + gen.writeRawValue(new RawUserDataValue(value.get().getBytes(StandardCharsets.UTF_8))); + } else { + try (JsonParser parser = SerdeUtils.INSTANCE.getJsonMapper().getFactory().createParser(value.get())) { + parser.nextToken(); + gen.copyCurrentStructure(parser); + } } } }; diff --git a/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java b/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java new file mode 100644 index 000000000..ca650569d --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/MultiDocumentEntityDeserializer.java @@ -0,0 +1,69 @@ +package com.arangodb.internal.serde; + +import com.arangodb.entity.ErrorEntity; +import com.arangodb.entity.MultiDocumentEntity; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.BeanProperty; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JavaType; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.deser.ContextualDeserializer; + +import java.io.IOException; + +public class MultiDocumentEntityDeserializer extends JsonDeserializer> implements ContextualDeserializer { + private final JavaType containedType; + private final InternalSerde serde; + + MultiDocumentEntityDeserializer(InternalSerde serde) { + this(serde, null); + } + + MultiDocumentEntityDeserializer(InternalSerde serde, JavaType containedType) { + this.serde = serde; + this.containedType = containedType; + } + + @Override + public MultiDocumentEntity deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + MultiDocumentEntity multiDocument = new MultiDocumentEntity<>(); + + // silent=true returns an empty object + if (p.currentToken() == JsonToken.START_OBJECT) { + if (p.nextToken() == JsonToken.END_OBJECT) { + return multiDocument; + } else { + throw new JsonMappingException(p, "Unexpected token sequence: START_OBJECT, " + p.currentToken()); + } + } + + if (p.currentToken() != JsonToken.START_ARRAY) { + throw new JsonMappingException(p, "Expected START_ARRAY but got " + p.currentToken()); + } + p.nextToken(); + while (p.currentToken() != JsonToken.END_ARRAY) { + if (p.currentToken() != JsonToken.START_OBJECT) { + throw new JsonMappingException(p, "Expected START_OBJECT but got " + p.currentToken()); + } + byte[] element = SerdeUtils.extractBytes(p); + if (serde.isDocument(element)) { + Object d = serde.deserializeUserData(element, containedType); + multiDocument.getDocuments().add(d); + multiDocument.getDocumentsAndErrors().add(d); + } else { + ErrorEntity e = serde.deserialize(element, ErrorEntity.class); + multiDocument.getErrors().add(e); + multiDocument.getDocumentsAndErrors().add(e); + } + p.nextToken(); // END_OBJECT + } + return multiDocument; + } + + @Override + public JsonDeserializer createContextual(DeserializationContext ctxt, BeanProperty property) { + return new MultiDocumentEntityDeserializer(serde, ctxt.getContextualType().containedType(0)); + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java b/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java new file mode 100644 index 000000000..4bfde90f2 --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/serde/RawUserDataValue.java @@ -0,0 +1,92 @@ +package com.arangodb.internal.serde; + +import com.fasterxml.jackson.core.SerializableString; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +class RawUserDataValue implements SerializableString { + private final byte[] data; + + RawUserDataValue(byte[] data) { + this.data = data; + } + + @Override + public String getValue() { + throw new UnsupportedOperationException(); + } + + @Override + public int charLength() { + throw new UnsupportedOperationException(); + } + + @Override + public char[] asQuotedChars() { + throw new UnsupportedOperationException(); + } + + @Override + public byte[] asUnquotedUTF8() { + return data; + } + + @Override + public byte[] asQuotedUTF8() { + throw new UnsupportedOperationException(); + } + + @Override + public int appendQuotedUTF8(byte[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int appendQuoted(char[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int appendUnquotedUTF8(byte[] buffer, int offset) { + final int length = data.length; + if ((offset + length) > buffer.length) { + return -1; + } + System.arraycopy(data, 0, buffer, offset, length); + return length; + } + + @Override + public int appendUnquoted(char[] buffer, int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public int writeQuotedUTF8(OutputStream out) { + throw new UnsupportedOperationException(); + } + + @Override + public int writeUnquotedUTF8(OutputStream out) throws IOException { + final int length = data.length; + out.write(data, 0, length); + return length; + } + + @Override + public int putQuotedUTF8(ByteBuffer buffer) { + throw new UnsupportedOperationException(); + } + + @Override + public int putUnquotedUTF8(ByteBuffer buffer) { + final int length = data.length; + if (length > buffer.remaining()) { + return -1; + } + buffer.put(data, 0, length); + return length; + } +} diff --git a/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java b/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java index 1ac00b7e0..2d7ad5935 100644 --- a/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java +++ b/core/src/main/java/com/arangodb/internal/serde/SerdeUtils.java @@ -1,14 +1,23 @@ package com.arangodb.internal.serde; import com.arangodb.ArangoDBException; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.BaseEdgeDocument; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.JsonToken; import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.type.TypeFactory; +import jakarta.json.JsonValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.Arrays; @@ -49,12 +58,16 @@ static void checkSupportedJacksonVersion() { ).forEach(version -> { int major = version.getMajorVersion(); int minor = version.getMinorVersion(); - if (major != 2 || minor < 10 || minor > 17) { + if (major != 2 || minor < 10 || minor > 20) { LOGGER.warn("Unsupported Jackson version: {}", version); } }); } + public ObjectMapper getJsonMapper() { + return jsonMapper; + } + /** * Parse a JSON string. * @@ -81,4 +94,54 @@ public String writeJson(final JsonNode data) { } } + /** + * Extract raw bytes for the current JSON (or VPACK) node + * + * @param parser JsonParser with current token pointing to the node to extract + * @return byte array + */ + @SuppressWarnings("deprecation") + public static byte[] extractBytes(JsonParser parser) throws IOException { + JsonToken t = parser.currentToken(); + if (t.isStructEnd() || t == JsonToken.FIELD_NAME) { + throw new ArangoDBException("Unexpected token: " + t); + } + byte[] data = (byte[]) parser.getTokenLocation().getSourceRef(); + int start = (int) parser.getTokenLocation().getByteOffset(); + int end = (int) parser.getCurrentLocation().getByteOffset(); + if (t.isStructStart()) { + int open = 1; + while (open > 0) { + t = parser.nextToken(); + if (t.isStructStart()) { + open++; + } else if (t.isStructEnd()) { + open--; + } + } + } + parser.finishToken(); + if (JsonFactory.FORMAT_NAME_JSON.equals(parser.getCodec().getFactory().getFormatName())) { + end = (int) parser.getCurrentLocation().getByteOffset(); + } + return Arrays.copyOfRange(data, start, end); + } + + public static boolean isManagedClass(Class clazz) { + return JsonNode.class.isAssignableFrom(clazz) || // jackson datatypes + JsonValue.class.isAssignableFrom(clazz) || // JSON-B datatypes + RawJson.class.equals(clazz) || + RawBytes.class.equals(clazz) || + BaseDocument.class.equals(clazz) || + BaseEdgeDocument.class.equals(clazz) || + isEntityClass(clazz); + } + + private static boolean isEntityClass(Class clazz) { + Package pkg = clazz.getPackage(); + if (pkg == null) { + return false; + } + return pkg.getName().startsWith("com.arangodb.entity"); + } } diff --git a/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java b/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java index 91220088b..ecb8c83f3 100644 --- a/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java +++ b/core/src/main/java/com/arangodb/internal/serde/UserDataDeserializer.java @@ -29,7 +29,12 @@ private UserDataDeserializer(final JavaType targetType, final InternalSerde serd @Override public Object deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { - return serde.deserializeUserData(p.readValueAsTree(), targetType); + Class clazz = (Class) targetType; + if (SerdeUtils.isManagedClass(clazz)) { + return p.readValueAs(clazz); + } else { + return serde.deserializeUserData(SerdeUtils.extractBytes(p), clazz); + } } @Override @@ -41,4 +46,5 @@ public Object deserializeWithType(JsonParser p, DeserializationContext ctxt, Typ public JsonDeserializer createContextual(DeserializationContext ctxt, BeanProperty property) { return new UserDataDeserializer(ctxt.getContextualType(), serde); } + } diff --git a/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java b/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java index d9a6acb30..501998da4 100644 --- a/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java +++ b/core/src/main/java/com/arangodb/internal/serde/UserDataSerializer.java @@ -1,7 +1,7 @@ package com.arangodb.internal.serde; import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.SerializerProvider; @@ -16,10 +16,10 @@ class UserDataSerializer extends JsonSerializer { @Override public void serialize(Object value, JsonGenerator gen, SerializerProvider serializers) throws IOException { - // TODO: find a way to append raw bytes directly - // see https://github.com/FasterXML/jackson-core/issues/914 - try (JsonParser parser = gen.getCodec().getFactory().createParser(serde.serializeUserData(value))) { - gen.writeTree(parser.readValueAsTree()); + if (value != null && JsonNode.class.isAssignableFrom(value.getClass())) { + gen.writeTree((JsonNode) value); + } else { + gen.writeRawValue(new RawUserDataValue(serde.serializeUserData(value))); } } } diff --git a/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java b/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java new file mode 100644 index 000000000..d3b1a223a --- /dev/null +++ b/core/src/main/java/com/arangodb/internal/util/AsyncQueue.java @@ -0,0 +1,45 @@ +package com.arangodb.internal.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayDeque; +import java.util.Queue; +import java.util.concurrent.*; + +public class AsyncQueue { + private static final Logger LOGGER = LoggerFactory.getLogger(AsyncQueue.class); + private final Queue> requests = new ConcurrentLinkedQueue<>(); + private final Queue offers = new ArrayDeque<>(); + + public synchronized CompletableFuture poll() { + LOGGER.trace("poll()"); + T o = offers.poll(); + if (o != null) { + LOGGER.trace("poll(): short-circuit: {}", o); + return CompletableFuture.completedFuture(o); + } + CompletableFuture r = new CompletableFuture<>(); + LOGGER.trace("poll(): enqueue request: {}", r); + requests.add(r); + return r; + } + + public void offer(T o) { + LOGGER.trace("offer({})", o); + CompletableFuture r = requests.poll(); + if (r == null) { + synchronized (this) { + r = requests.poll(); + if (r == null) { + LOGGER.trace("offer({}): enqueue", o); + offers.add(o); + } + } + } + if (r != null) { + LOGGER.trace("offer({}): short-circuit: {}", o, r); + r.complete(o); + } + } +} diff --git a/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java b/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java index 4a927b878..57b69c319 100644 --- a/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java +++ b/core/src/main/java/com/arangodb/internal/util/ResponseUtils.java @@ -28,6 +28,8 @@ import com.arangodb.internal.net.ArangoDBUnavailableException; import com.arangodb.internal.serde.InternalSerde; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.concurrent.TimeoutException; /** @@ -38,12 +40,14 @@ public final class ResponseUtils { private static final int ERROR_STATUS = 300; private static final int ERROR_INTERNAL = 503; private static final String HEADER_ENDPOINT = "x-arango-endpoint"; + private static final String CONTENT_TYPE = "content-type"; + private static final String TEXT_PLAIN = "text/plain"; private ResponseUtils() { super(); } - public static ArangoDBException translateError(final InternalSerde util, final InternalResponse response) { + public static ArangoDBException translateError(InternalSerde serde, InternalResponse response) { final int responseCode = response.getResponseCode(); if (responseCode < ERROR_STATUS) { return null; @@ -52,17 +56,49 @@ public static ArangoDBException translateError(final InternalSerde util, final I return new ArangoDBRedirectException(String.format("Response Code: %s", responseCode), response.getMeta(HEADER_ENDPOINT)); } - if (response.getBody() != null) { - final ErrorEntity errorEntity = util.deserialize(response.getBody(), ErrorEntity.class); - if (errorEntity.getCode() == ERROR_INTERNAL && errorEntity.getErrorNum() == ERROR_INTERNAL) { - return ArangoDBUnavailableException.from(errorEntity); - } - ArangoDBException e = new ArangoDBException(errorEntity); - if (ArangoErrors.QUEUE_TIME_VIOLATED.equals(e.getErrorNum())) { - return ArangoDBException.of(new TimeoutException().initCause(e)); - } - return e; + + byte[] body = response.getBody(); + if (body == null) { + return new ArangoDBException(String.format("Response Code: %s", responseCode), responseCode); + } + + if (isTextPlain(response)) { + String payload = new String(body, getContentTypeCharset(response)); + return new ArangoDBException("Response Code: " + responseCode + "[" + payload + "]", responseCode); + } + + ErrorEntity errorEntity; + try { + errorEntity = serde.deserialize(body, ErrorEntity.class); + } catch (Exception e) { + ArangoDBException adbEx = new ArangoDBException("Response Code: " + responseCode + + " [Unparsable data] Response: " + response, responseCode); + adbEx.addSuppressed(e); + return adbEx; + } + + if (errorEntity.getCode() == ERROR_INTERNAL && errorEntity.getErrorNum() == ERROR_INTERNAL) { + return ArangoDBUnavailableException.from(errorEntity); } - return new ArangoDBException(String.format("Response Code: %s", responseCode), responseCode); + ArangoDBException e = new ArangoDBException(errorEntity); + if (ArangoErrors.QUEUE_TIME_VIOLATED.equals(e.getErrorNum())) { + return ArangoDBException.of(new TimeoutException().initCause(e)); + } + return e; + } + + private static boolean isTextPlain(InternalResponse response) { + String contentType = response.getMeta(CONTENT_TYPE); + return contentType != null && contentType.startsWith(TEXT_PLAIN); } + + private static Charset getContentTypeCharset(InternalResponse response) { + String contentType = response.getMeta(CONTENT_TYPE); + int paramIdx = contentType.indexOf("charset="); + if (paramIdx == -1) { + return StandardCharsets.UTF_8; + } + return Charset.forName(contentType.substring(paramIdx + 8)); + } + } diff --git a/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java index a09889378..ac4d1d161 100644 --- a/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java +++ b/core/src/main/java/com/arangodb/model/AqlQueryExplainOptions.java @@ -20,6 +20,7 @@ package com.arangodb.model; +import com.arangodb.ArangoDatabase; import com.arangodb.internal.serde.UserDataInside; import java.util.Collection; @@ -28,10 +29,13 @@ /** * @author Mark Vollmary * @author Michele Rastelli + * + * @deprecated for removal, use {@link ArangoDatabase#explainAqlQuery(String, Map, ExplainAqlQueryOptions)} instead */ +@Deprecated public final class AqlQueryExplainOptions { - private Map bindVars; + private Map bindVars; private String query; private Options options; @@ -40,7 +44,7 @@ public AqlQueryExplainOptions() { } @UserDataInside - public Map getBindVars() { + public Map getBindVars() { return bindVars; } @@ -48,7 +52,7 @@ public Map getBindVars() { * @param bindVars key/value pairs representing the bind parameters * @return options */ - AqlQueryExplainOptions bindVars(final Map bindVars) { + AqlQueryExplainOptions bindVars(final Map bindVars) { this.bindVars = bindVars; return this; } @@ -110,7 +114,7 @@ public AqlQueryExplainOptions rules(final Collection rules) { return this; } - private Options getOptions() { + public Options getOptions() { if (options == null) { options = new Options(); } @@ -128,9 +132,21 @@ public Optimizer getOptimizer() { } return optimizer; } + + public Integer getMaxNumberOfPlans() { + return maxNumberOfPlans; + } + + public Boolean getAllPlans() { + return allPlans; + } } public static final class Optimizer { private Collection rules; + + public Collection getRules() { + return rules; + } } } diff --git a/core/src/main/java/com/arangodb/model/AqlQueryOptions.java b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java index 91c551d14..ccf217a16 100644 --- a/core/src/main/java/com/arangodb/model/AqlQueryOptions.java +++ b/core/src/main/java/com/arangodb/model/AqlQueryOptions.java @@ -20,7 +20,11 @@ package com.arangodb.model; +import com.arangodb.ArangoCursor; import com.arangodb.internal.serde.UserDataInside; +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; import java.util.*; @@ -30,49 +34,40 @@ */ public final class AqlQueryOptions extends TransactionalOptions implements Cloneable { - private Boolean count; - private Integer ttl; + private Boolean allowDirtyRead; private Integer batchSize; + private Map bindVars; private Boolean cache; + private Boolean count; private Long memoryLimit; - private Map bindVars; - private String query; private Options options; - private Boolean allowDirtyRead; + private String query; + private Integer ttl; @Override AqlQueryOptions getThis() { return this; } - public Boolean getCount() { - return count; - } - - /** - * @param count indicates whether the number of documents in the result set should be returned in the "count" - * attribute of the result. Calculating the "count" attribute might have a performance impact for some - * queries in the future so this option is turned off by default, and "count" is only returned when - * requested. - * @return options - */ - public AqlQueryOptions count(final Boolean count) { - this.count = count; - return this; - } - - public Integer getTtl() { - return ttl; + public Boolean getAllowDirtyRead() { + return allowDirtyRead; } /** - * @param ttl The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically - * after the specified amount of time. This is useful to ensure garbage collection of cursors that are - * not fully fetched by clients. If not set, a server-defined value will be used. - * @return options + * Sets the header {@code x-arango-allow-dirty-read} to {@code true} to allow the Coordinator to ask any shard + * replica for the data, not only the shard leader. This may result in β€œdirty reads”. + *

+ * The header is ignored if this operation is part of a Stream Transaction + * ({@link AqlQueryOptions#streamTransactionId(String)}). The header set when creating the transaction decides + * about dirty reads for the entire transaction, not the individual read operations. + * + * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. + * @return this + * @see API + * Documentation */ - public AqlQueryOptions ttl(final Integer ttl) { - this.ttl = ttl; + public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) { + this.allowDirtyRead = allowDirtyRead; return this; } @@ -82,32 +77,28 @@ public Integer getBatchSize() { /** * @param batchSize maximum number of result documents to be transferred from the server to the client in one - * roundtrip. - * If this attribute is not set, a server-controlled default value will be used. A batchSize - * value of 0 - * is disallowed. - * @return options + * roundtrip. If this attribute is not set, a server-controlled default value will be used. + * A batchSize value of 0 is disallowed. + * @return this */ public AqlQueryOptions batchSize(final Integer batchSize) { this.batchSize = batchSize; return this; } - public Long getMemoryLimit() { - return memoryLimit; + @UserDataInside + public Map getBindVars() { + return bindVars; } /** - * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set, - * then the - * query will fail with error "resource limit exceeded" in case it allocates too much memory. - * A value of - * 0 indicates that there is no memory limit. - * @return options - * @since ArangoDB 3.1.0 + * @param bindVars A map with key/value pairs representing the bind parameters. For a bind variable {@code @var} in + * the query, specify the value using an attribute with the name {@code var}. For a collection bind + * variable {@code @@coll}, use {@code @coll} as the attribute name. + * @return this */ - public AqlQueryOptions memoryLimit(final Long memoryLimit) { - this.memoryLimit = memoryLimit; + AqlQueryOptions bindVars(final Map bindVars) { + this.bindVars = bindVars; return this; } @@ -116,497 +107,359 @@ public Boolean getCache() { } /** - * @param cache flag to determine whether the AQL query cache shall be used. If set to false, then any query cache - * lookup will be skipped for the query. If set to true, it will lead to the query cache being checked - * for the query if the query cache mode is either on or demand. - * @return options + * @param cache flag to determine whether the AQL query results cache shall be used. If set to false, then any + * query cache lookup will be skipped for the query. If set to true, it will lead to the query cache + * being checked for the query if the query cache mode is either on or demand. + * @return this */ public AqlQueryOptions cache(final Boolean cache) { this.cache = cache; return this; } - public Boolean getFillBlockCache() { - return getOptions().fillBlockCache; + public Boolean getCount() { + return count; } /** - * @param fillBlockCache if set to true or not specified, this will make the query store - * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is - * usually the desired behavior. The option can be set to false for queries that - * are known to either read a lot of data that would thrash the block cache, or for queries - * that read data known to be outside of the hot set. By setting the option - * to false, data read by the query will not make it into the RocksDB block - * cache if - * it is not already in there, thus leaving more room for the actual hot set. - * @return options - * @since ArangoDB 3.8.1 + * @param count indicates whether the number of documents in the result set should be returned and made accessible + * via {@link ArangoCursor#getCount()}. Calculating the {@code count} attribute might have a + * performance impact for some queries in the future so this option is turned off by default, and + * {@code count} is only returned when requested. + * @return this */ - public AqlQueryOptions fillBlockCache(final Boolean fillBlockCache) { - getOptions().fillBlockCache = fillBlockCache; + public AqlQueryOptions count(final Boolean count) { + this.count = count; return this; } - @UserDataInside - public Map getBindVars() { - return bindVars; + public Long getMemoryLimit() { + return memoryLimit; } /** - * @param bindVarsBytes serialized bind parameters - * @return options + * @param memoryLimit the maximum number of memory (measured in bytes) that the query is allowed to use. If set, + * then the query will fail with error {@code resource limit exceeded} in case it allocates too + * much memory. A value of {@code 0} indicates that there is no memory limit. + * @return this + * @since ArangoDB 3.1.0 */ - AqlQueryOptions bindVars(final Map bindVarsBytes) { - this.bindVars = bindVarsBytes; + public AqlQueryOptions memoryLimit(final Long memoryLimit) { + this.memoryLimit = memoryLimit; return this; } - public String getQuery() { - return query; + public Options getOptions() { + if (options == null) { + options = new Options(); + } + return options; } /** - * @param query the query which you want parse - * @return options + * @param options extra options for the query + * @return this */ - public AqlQueryOptions query(final String query) { - this.query = query; + public AqlQueryOptions options(final Options options) { + this.options = options; return this; } - public Boolean getFailOnWarning() { - return getOptions().failOnWarning; - } - - /** - * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a - * warning. This - * option should be used during development to catch potential issues early. When the - * attribute is set to - * false, warnings will not be propagated to exceptions and will be returned with the query - * result. There - * is also a server configuration option --query.fail-on-warning for setting the default - * value for - * failOnWarning so it does not need to be set on a per-query level. - * @return options - */ - public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { - getOptions().failOnWarning = failOnWarning; - return this; + public String getQuery() { + return query; } /** - * @param timeout The query has to be executed within the given runtime or it will be killed. The value is specified - * in seconds. The default value is 0.0 (no timeout). - * @return options + * @param query the query to be executed + * @return this */ - public AqlQueryOptions maxRuntime(final Double timeout) { - getOptions().maxRuntime = timeout; + public AqlQueryOptions query(final String query) { + this.query = query; return this; } - /** - * @return If set to true, then the additional query profiling information will be returned in the sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. - */ - public Boolean getProfile() { - return getOptions().profile; + public Integer getTtl() { + return ttl; } /** - * @param profile If set to true, then the additional query profiling information will be returned in the - * sub-attribute - * profile of the extra return attribute if the query result is not served from the query cache. - * @return options + * @param ttl The time-to-live for the cursor (in seconds). If the result set is small enough (less than or equal + * to batchSize) then results are returned right away. Otherwise, they are stored in memory and will be + * accessible via the cursor with respect to the ttl. The cursor will be removed on the server + * automatically after the specified amount of time. This is useful to ensure garbage collection of + * cursors that are not fully fetched by clients. + *

+ * If not set, a server-defined value will be used (default: 30 seconds). + *

+ * The time-to-live is renewed upon every access to the cursor. + * @return this */ - public AqlQueryOptions profile(final Boolean profile) { - getOptions().profile = profile; + public AqlQueryOptions ttl(final Integer ttl) { + this.ttl = ttl; return this; } - public Long getMaxTransactionSize() { - return getOptions().maxTransactionSize; - } - - /** - * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { - getOptions().maxTransactionSize = maxTransactionSize; - return this; + @Override + public AqlQueryOptions clone() { + try { + AqlQueryOptions clone = (AqlQueryOptions) super.clone(); + clone.bindVars = bindVars != null ? new HashMap<>(bindVars) : null; + clone.options = options != null ? options.clone() : null; + return clone; + } catch (CloneNotSupportedException e) { + throw new AssertionError(); + } } - public Long getMaxWarningCount() { - return getOptions().maxWarningCount; - } + public static final class Options implements Cloneable { + private Map customOptions; + private Boolean allPlans; + private Boolean allowDirtyReads; + private Boolean allowRetry; + private Boolean failOnWarning; + private Boolean fillBlockCache; + private String forceOneShardAttributeValue; + private Boolean fullCount; + private Long intermediateCommitCount; + private Long intermediateCommitSize; + private Integer maxDNFConditionMembers; + private Integer maxNodesPerCallstack; + private Integer maxNumberOfPlans; + private Double maxRuntime; + private Long maxTransactionSize; + private Long maxWarningCount; + private Optimizer optimizer; + private Boolean profile; + private Double satelliteSyncWait; + private Collection shardIds; + private Boolean skipInaccessibleCollections; + private Long spillOverThresholdMemoryUsage; + private Long spillOverThresholdNumRows; + private Boolean stream; + private Boolean usePlanCache; - /** - * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a - * query will return - * is limited to 10 by default, but that number can be increased or decreased by setting - * this attribute. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions maxWarningCount(final Long maxWarningCount) { - getOptions().maxWarningCount = maxWarningCount; - return this; - } + @JsonInclude + @JsonAnyGetter + public Map getCustomOptions() { + if (customOptions == null) { + customOptions = new HashMap<>(); + } + return customOptions; + } - public Long getIntermediateCommitCount() { - return getOptions().intermediateCommitCount; - } + public void setCustomOption(String key, Object value) { + getCustomOptions().put(key, value); + } - /** - * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed - * automatically. Honored by - * the RocksDB storage engine only. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { - getOptions().intermediateCommitCount = intermediateCommitCount; - return this; - } + public Boolean getAllPlans() { + return allPlans; + } - public Long getIntermediateCommitSize() { - return getOptions().intermediateCommitSize; - } + public Boolean getAllowDirtyReads() { + return allowDirtyReads; + } - /** - * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed - * automatically. - * Honored by the RocksDB storage engine only. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { - getOptions().intermediateCommitSize = intermediateCommitSize; - return this; - } + public Boolean getAllowRetry() { + return allowRetry; + } - public Double getSatelliteSyncWait() { - return getOptions().satelliteSyncWait; - } + public Boolean getFailOnWarning() { + return failOnWarning; + } - /** - * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to - * bring the - * satellite collections involved in the query into sync. The default value is 60.0 - * (seconds). When the - * max time has been reached the query will be stopped. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { - getOptions().satelliteSyncWait = satelliteSyncWait; - return this; - } + public Boolean getFillBlockCache() { + return fillBlockCache; + } - public Boolean getSkipInaccessibleCollections() { - return getOptions().skipInaccessibleCollections; - } + public String getForceOneShardAttributeValue() { + return forceOneShardAttributeValue; + } - /** - * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a - * user has no access rights - * as if these collections were empty. Instead of returning a forbidden access - * error, your queries will - * execute normally. This is intended to help with certain use-cases: A graph - * contains several - * collections and different users execute AQL queries on that graph. You can - * now naturally limit the - * accessible results by changing the access rights of users on collections. - * This feature is only - * available in the Enterprise Edition. - * @return options - * @since ArangoDB 3.2.0 - */ - public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { - getOptions().skipInaccessibleCollections = skipInaccessibleCollections; - return this; - } + public Boolean getFullCount() { + return fullCount; + } - public Boolean getFullCount() { - return getOptions().fullCount; - } + public Long getIntermediateCommitCount() { + return intermediateCommitCount; + } - /** - * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra - * attribute - * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } - * } }. The - * fullCount attribute will contain the number of documents in the result before the last LIMIT - * in the - * query was applied. It can be used to count the number of documents that match certain filter - * criteria, - * but only return a subset of them, in one go. It is thus similar to MySQL's - * SQL_CALC_FOUND_ROWS hint. - * Note that setting the option will disable a few LIMIT optimizations and may lead to more - * documents - * being processed, and thus make queries run longer. Note that the fullCount attribute will - * only be - * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used - * in the - * query. - * @return options - */ - public AqlQueryOptions fullCount(final Boolean fullCount) { - getOptions().fullCount = fullCount; - return this; - } + public Long getIntermediateCommitSize() { + return intermediateCommitSize; + } - public Integer getMaxPlans() { - return getOptions().maxPlans; - } + public Integer getMaxDNFConditionMembers() { + return maxDNFConditionMembers; + } - /** - * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer. - * @return options - */ - public AqlQueryOptions maxPlans(final Integer maxPlans) { - getOptions().maxPlans = maxPlans; - return this; - } + public Integer getMaxNodesPerCallstack() { + return maxNodesPerCallstack; + } - public Collection getRules() { - return getOptions().getOptimizer().rules; - } + public Integer getMaxNumberOfPlans() { + return maxNumberOfPlans; + } - /** - * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, - * telling the - * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to - * enable - * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules - * @return options - */ - public AqlQueryOptions rules(final Collection rules) { - getOptions().getOptimizer().rules = rules; - return this; - } + /** + * @deprecated for removal, use {@link Options#getMaxNumberOfPlans()} instead + */ + @Deprecated + @JsonIgnore + public Integer getMaxPlans() { + return getMaxNumberOfPlans(); + } - public Boolean getStream() { - return getOptions().stream; - } + public Double getMaxRuntime() { + return maxRuntime; + } - /** - * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not - * stored on - * the server, but calculated on the fly. Beware: long-running queries will need to hold the - * collection - * locks for as long as the query cursor exists. When set to false a query will be executed right - * away in - * its entirety. In that case query results are either returned right away (if the resultset is small - * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the - * ttl). It is advisable to only use this option on short-running queries or without exclusive locks - * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not - * work - * on streaming queries. Additionally query statistics, warnings and profiling data will only be - * available after the query is finished. The default value is false - * @return options - * @since ArangoDB 3.4.0 - */ - public AqlQueryOptions stream(final Boolean stream) { - getOptions().stream = stream; - return this; - } + public Long getMaxTransactionSize() { + return maxTransactionSize; + } - public Collection getShardIds() { - return getOptions().shardIds; - } + public Long getMaxWarningCount() { + return maxWarningCount; + } - /** - * Restrict query to shards by given ids. This is an internal option. Use at your own risk. - * - * @param shardIds - * @return options - */ - public AqlQueryOptions shardIds(final String... shardIds) { - getOptions().shardIds = Arrays.asList(shardIds); - return this; - } + public Optimizer getOptimizer() { + if (optimizer == null) { + optimizer = new Optimizer(); + } + return optimizer; + } - public String getForceOneShardAttributeValue() { - return options != null ? options.forceOneShardAttributeValue : null; - } + public Boolean getProfile() { + return profile; + } - /** - * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer - * cannot automatically detect that the query can be limited to only a single - * server (e.g. in a disjoint smart graph case). - *

- * If the option is set incorrectly, i.e. to a wrong shard key value, then the - * query may be shipped to a wrong DB server and may not return results (i.e. - * empty result set). - *

- * Use at your own risk. - * @return options - */ - public AqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) { - getOptions().forceOneShardAttributeValue = forceOneShardAttributeValue; - return this; - } + public Double getSatelliteSyncWait() { + return satelliteSyncWait; + } - public Options getOptions() { - if (options == null) { - options = new Options(); + public Collection getShardIds() { + return shardIds; } - return options; - } - /** - * @param allowDirtyRead Set to {@code true} allows reading from followers in an active-failover setup. - * @return options - * @see API - * Documentation - * @since ArangoDB 3.4.0 - */ - public AqlQueryOptions allowDirtyRead(final Boolean allowDirtyRead) { - this.allowDirtyRead = allowDirtyRead; - return this; - } + public Boolean getSkipInaccessibleCollections() { + return skipInaccessibleCollections; + } - public Boolean getAllowDirtyRead() { - return allowDirtyRead; - } + public Long getSpillOverThresholdMemoryUsage() { + return spillOverThresholdMemoryUsage; + } - public Boolean getAllowRetry() { - return getOptions().allowRetry; - } + public Long getSpillOverThresholdNumRows() { + return spillOverThresholdNumRows; + } - /** - * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor. - *

- * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in - * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} - * with cause {@link java.io.IOException}) - *

- * If set to false (default), then it is not safe to retry invoking - * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to - * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the - * server). - *

- * Note: once you successfully received the last batch, you should call - * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the - * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}). - * @return options - * @since ArangoDB 3.11 - */ - public AqlQueryOptions allowRetry(final Boolean allowRetry) { - getOptions().allowRetry = allowRetry; - return this; - } + public Boolean getStream() { + return stream; + } - @Override - public AqlQueryOptions clone() { - try { - AqlQueryOptions clone = (AqlQueryOptions) super.clone(); - clone.bindVars = bindVars != null ? new HashMap<>(bindVars) : null; - clone.options = options != null ? options.clone() : null; - return clone; - } catch (CloneNotSupportedException e) { - throw new AssertionError(); + public Boolean getUsePlanCache() { + return usePlanCache; } - } - public static final class Options implements Cloneable { + public void setAllPlans(Boolean allPlans) { + this.allPlans = allPlans; + } - private Boolean failOnWarning; - private Boolean profile; - private Long maxTransactionSize; - private Long maxWarningCount; - private Long intermediateCommitCount; - private Long intermediateCommitSize; - private Double satelliteSyncWait; - private Boolean skipInaccessibleCollections; - private Optimizer optimizer; - private Boolean fullCount; - private Integer maxPlans; - private Boolean stream; - private Collection shardIds; - private Double maxRuntime; - private Boolean fillBlockCache; - private String forceOneShardAttributeValue; - private Boolean allowRetry; + public void setAllowDirtyReads(Boolean allowDirtyReads) { + this.allowDirtyReads = allowDirtyReads; + } - public Boolean getFailOnWarning() { - return failOnWarning; + public void setAllowRetry(Boolean allowRetry) { + this.allowRetry = allowRetry; } - public Boolean getProfile() { - return profile; + public void setFailOnWarning(Boolean failOnWarning) { + this.failOnWarning = failOnWarning; } - public Long getMaxTransactionSize() { - return maxTransactionSize; + public void setFillBlockCache(Boolean fillBlockCache) { + this.fillBlockCache = fillBlockCache; } - public Long getMaxWarningCount() { - return maxWarningCount; + public void setForceOneShardAttributeValue(String forceOneShardAttributeValue) { + this.forceOneShardAttributeValue = forceOneShardAttributeValue; } - public Long getIntermediateCommitCount() { - return intermediateCommitCount; + public void setFullCount(Boolean fullCount) { + this.fullCount = fullCount; } - public Long getIntermediateCommitSize() { - return intermediateCommitSize; + public void setIntermediateCommitCount(Long intermediateCommitCount) { + this.intermediateCommitCount = intermediateCommitCount; } - public Double getSatelliteSyncWait() { - return satelliteSyncWait; + public void setIntermediateCommitSize(Long intermediateCommitSize) { + this.intermediateCommitSize = intermediateCommitSize; } - public Boolean getSkipInaccessibleCollections() { - return skipInaccessibleCollections; + public void setMaxDNFConditionMembers(Integer maxDNFConditionMembers) { + this.maxDNFConditionMembers = maxDNFConditionMembers; } - public Boolean getFullCount() { - return fullCount; + public void setMaxNodesPerCallstack(Integer maxNodesPerCallstack) { + this.maxNodesPerCallstack = maxNodesPerCallstack; } - public Integer getMaxPlans() { - return maxPlans; + public void setMaxNumberOfPlans(Integer maxNumberOfPlans) { + this.maxNumberOfPlans = maxNumberOfPlans; } - public Boolean getStream() { - return stream; + public void setMaxRuntime(Double maxRuntime) { + this.maxRuntime = maxRuntime; } - public Double getMaxRuntime() { - return maxRuntime; + public void setMaxTransactionSize(Long maxTransactionSize) { + this.maxTransactionSize = maxTransactionSize; } - public Boolean getFillBlockCache() { - return fillBlockCache; + public void setMaxWarningCount(Long maxWarningCount) { + this.maxWarningCount = maxWarningCount; } - public String getForceOneShardAttributeValue() { - return forceOneShardAttributeValue; + public void setOptimizer(Optimizer optimizer) { + this.optimizer = optimizer; } - public Optimizer getOptimizer() { - if (optimizer == null) { - optimizer = new Optimizer(); - } - return optimizer; + public void setProfile(Boolean profile) { + this.profile = profile; } - public Collection getShardIds() { - return shardIds; + public void setSatelliteSyncWait(Double satelliteSyncWait) { + this.satelliteSyncWait = satelliteSyncWait; } - public Boolean getAllowRetry() { - return allowRetry; + public void setShardIds(Collection shardIds) { + this.shardIds = shardIds; + } + + public void setSkipInaccessibleCollections(Boolean skipInaccessibleCollections) { + this.skipInaccessibleCollections = skipInaccessibleCollections; + } + + public void setSpillOverThresholdMemoryUsage(Long spillOverThresholdMemoryUsage) { + this.spillOverThresholdMemoryUsage = spillOverThresholdMemoryUsage; + } + + public void setSpillOverThresholdNumRows(Long spillOverThresholdNumRows) { + this.spillOverThresholdNumRows = spillOverThresholdNumRows; + } + + public void setStream(Boolean stream) { + this.stream = stream; + } + + public void setUsePlanCache(Boolean usePlanCache) { + this.usePlanCache = usePlanCache; } @Override public Options clone() { try { Options clone = (Options) super.clone(); + clone.customOptions = customOptions != null ? new HashMap<>(customOptions) : null; clone.optimizer = optimizer != null ? optimizer.clone() : null; clone.shardIds = shardIds != null ? new ArrayList<>(shardIds) : null; return clone; @@ -623,6 +476,10 @@ public Collection getRules() { return rules; } + public void setRules(Collection rules) { + this.rules = rules; + } + @Override public Optimizer clone() { try { @@ -635,4 +492,541 @@ public Optimizer clone() { } } + // ------------------------------------ + // --- accessors for nested options --- + // ------------------------------------ + + @JsonIgnore + public Map getCustomOptions() { + return getOptions().getCustomOptions(); + } + + /** + * Set an additional custom option in the form of key-value pair. + * + * @param key option name + * @param value option value + * @return this + */ + public AqlQueryOptions customOption(String key, Object value) { + getOptions().setCustomOption(key, value); + return this; + } + + @JsonIgnore + public Boolean getAllowDirtyReads() { + return getOptions().getAllowDirtyReads(); + } + + /** + * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then + * the Coordinator is allowed to read from any shard replica and not only from the leader. + * You may observe data inconsistencies (dirty reads) when reading from followers, namely + * obsolete revisions of documents because changes have not yet been replicated to the + * follower, as well as changes to documents before they are officially committed on the + * leader. This feature is only available in the Enterprise Edition. + * @return this + */ + public AqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) { + getOptions().setAllowDirtyReads(allowDirtyReads); + return this; + } + + @JsonIgnore + public Boolean getAllowRetry() { + return getOptions().getAllowRetry(); + } + + /** + * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor. + *

+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in + * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} + * with cause {@link java.io.IOException}) + *

+ * If set to false (default), then it is not safe to retry invoking + * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to + * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the + * server). + *

+ * Note: once you successfully received the last batch, you should call + * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the + * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}). + * @return this + * @since ArangoDB 3.11 + */ + public AqlQueryOptions allowRetry(final Boolean allowRetry) { + getOptions().setAllowRetry(allowRetry); + return this; + } + + @JsonIgnore + public Boolean getFailOnWarning() { + return getOptions().getFailOnWarning(); + } + + /** + * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a + * warning. This option should be used during development to catch potential issues early. + * When the attribute is set to false, warnings will not be propagated to exceptions and will + * be returned with the query result. There is also a server configuration option + * --query.fail-on-warning for setting the default value for failOnWarning so it does not + * need to be set on a per-query level. + * @return this + */ + public AqlQueryOptions failOnWarning(final Boolean failOnWarning) { + getOptions().setFailOnWarning(failOnWarning); + return this; + } + + @JsonIgnore + public Boolean getFillBlockCache() { + return getOptions().getFillBlockCache(); + } + + /** + * @param fillBlockCache if set to true or not specified, this will make the query store + * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is + * usually the desired behavior. The option can be set to false for queries that + * are known to either read a lot of data that would thrash the block cache, or for queries + * that read data known to be outside of the hot set. By setting the option + * to false, data read by the query will not make it into the RocksDB block + * cache if it is not already in there, thus leaving more room for the actual hot set. + * @return this + * @since ArangoDB 3.8.1 + */ + public AqlQueryOptions fillBlockCache(final Boolean fillBlockCache) { + getOptions().setFillBlockCache(fillBlockCache); + return this; + } + + @JsonIgnore + public String getForceOneShardAttributeValue() { + return getOptions().getForceOneShardAttributeValue(); + } + + /** + * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer + * cannot automatically detect that the query can be limited to only a single + * server (e.g. in a disjoint smart graph case). + *

+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the + * query may be shipped to a wrong DB server and may not return results (i.e. + * empty result set). + *

+ * Use at your own risk. + * @return this + */ + public AqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) { + getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue); + return this; + } + + @JsonIgnore + public Boolean getFullCount() { + return getOptions().getFullCount(); + } + + /** + * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra + * attribute + * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } + * } }. The + * fullCount attribute will contain the number of documents in the result before the last LIMIT + * in the + * query was applied. It can be used to count the number of documents that match certain filter + * criteria, + * but only return a subset of them, in one go. It is thus similar to MySQL's + * SQL_CALC_FOUND_ROWS hint. + * Note that setting the option will disable a few LIMIT optimizations and may lead to more + * documents + * being processed, and thus make queries run longer. Note that the fullCount attribute will + * only be + * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used + * in the + * query. + * @return this + */ + public AqlQueryOptions fullCount(final Boolean fullCount) { + getOptions().setFullCount(fullCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitCount() { + return getOptions().getIntermediateCommitCount(); + } + + /** + * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed + * automatically. Honored by + * the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { + getOptions().setIntermediateCommitCount(intermediateCommitCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitSize() { + return getOptions().getIntermediateCommitSize(); + } + + /** + * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed + * automatically. + * Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { + getOptions().setIntermediateCommitSize(intermediateCommitSize); + return this; + } + + @JsonIgnore + public Integer getMaxDNFConditionMembers() { + return getOptions().getMaxDNFConditionMembers(); + } + + /** + * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation + * of an AQL FILTER condition. + *

+ * Yon can use this option to limit the computation time and memory usage when + * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal + * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can + * take a large amount of processing time and memory. This query option limits the + * computation time and memory usage for such conditions. + *

+ * Once the threshold value is reached during the DNF conversion of a FILTER + * condition, the conversion is aborted, and the query continues with a simplified + * internal representation of the condition, which cannot be used for index lookups. + *

+ * You can set the threshold globally instead of per query with the + * --query.max-dnf-condition-members startup option. + * @return this + */ + public AqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) { + getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers); + return this; + } + + @JsonIgnore + public Integer getMaxNodesPerCallstack() { + return getOptions().getMaxNodesPerCallstack(); + } + + /** + * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is + * performed to avoid a potential stack overflow. Defaults to the configured value of + * the startup option --query.max-nodes-per-callstack. + *

+ * This option is only useful for testing and debugging and normally does not need any + * adjustment. + * @return this + */ + public AqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) { + getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack); + return this; + } + + @JsonIgnore + public Integer getMaxNumberOfPlans() { + return getOptions().getMaxNumberOfPlans(); + } + + /** + * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + */ + public AqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { + getOptions().setMaxNumberOfPlans(maxNumberOfPlans); + return this; + } + + /** + * @deprecated for removal, use {@link AqlQueryOptions#getMaxNumberOfPlans()} instead + */ + @Deprecated + @JsonIgnore + public Integer getMaxPlans() { + return getMaxNumberOfPlans(); + } + + /** + * @param maxPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + * @deprecated for removal, use {@link AqlQueryOptions#maxNumberOfPlans(Integer)} instead + */ + @Deprecated + public AqlQueryOptions maxPlans(final Integer maxPlans) { + return maxNumberOfPlans(maxPlans); + } + + @JsonIgnore + public Double getMaxRuntime() { + return getOptions().getMaxRuntime(); + } + + /** + * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified + * in seconds. The default value is 0.0 (no timeout). + * @return this + */ + public AqlQueryOptions maxRuntime(final Double maxRuntime) { + getOptions().setMaxRuntime(maxRuntime); + return this; + } + + @JsonIgnore + public Long getMaxTransactionSize() { + return getOptions().getMaxTransactionSize(); + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { + getOptions().setMaxTransactionSize(maxTransactionSize); + return this; + } + + @JsonIgnore + public Long getMaxWarningCount() { + return getOptions().getMaxWarningCount(); + } + + /** + * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a + * query will return + * is limited to 10 by default, but that number can be increased or decreased by setting + * this attribute. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions maxWarningCount(final Long maxWarningCount) { + getOptions().setMaxWarningCount(maxWarningCount); + return this; + } + + @JsonIgnore + public Optimizer getOptimizer() { + return getOptions().getOptimizer(); + } + + /** + * @param optimizer Options related to the query optimizer. + * @return this + */ + public AqlQueryOptions optimizer(final Optimizer optimizer) { + getOptions().setOptimizer(optimizer); + return this; + } + + @JsonIgnore + public Boolean getProfile() { + return getOptions().getProfile(); + } + + /** + * @param profile If set to true, then the additional query profiling information will be returned in the + * sub-attribute + * profile of the extra return attribute if the query result is not served from the query cache. + * @return this + */ + public AqlQueryOptions profile(final Boolean profile) { + getOptions().setProfile(profile); + return this; + } + + @JsonIgnore + public Double getSatelliteSyncWait() { + return getOptions().getSatelliteSyncWait(); + } + + /** + * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to + * bring the + * satellite collections involved in the query into sync. The default value is 60.0 + * (seconds). When the + * max time has been reached the query will be stopped. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { + getOptions().setSatelliteSyncWait(satelliteSyncWait); + return this; + } + + @JsonIgnore + public Collection getShardIds() { + return getOptions().getShardIds(); + } + + /** + * Restrict query to shards by given ids. This is an internal option. Use at your own risk. + * + * @param shardIds + * @return this + */ + public AqlQueryOptions shardIds(final String... shardIds) { + getOptions().setShardIds(Arrays.asList(shardIds)); + return this; + } + + @JsonIgnore + public Boolean getSkipInaccessibleCollections() { + return getOptions().getSkipInaccessibleCollections(); + } + + /** + * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a + * user has no access rights + * as if these collections were empty. Instead of returning a forbidden access + * error, your queries will + * execute normally. This is intended to help with certain use-cases: A graph + * contains several + * collections and different users execute AQL queries on that graph. You can + * now naturally limit the + * accessible results by changing the access rights of users on collections. + * This feature is only + * available in the Enterprise Edition. + * @return this + * @since ArangoDB 3.2.0 + */ + public AqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { + getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdMemoryUsage() { + return getOptions().getSpillOverThresholdMemoryUsage(); + } + + /** + * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results + * temporarily on disk if the amount of memory used (in bytes) exceeds the + * specified value. This is used for decreasing the memory usage during the + * query execution. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path + * for the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 128MB. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned + * off by default. The query results are still built up entirely in RAM on + * Coordinators and single servers for non-streaming queries. To avoid the + * buildup of the entire query result in RAM, use a streaming query (see the + * stream option). + * @return this + */ + public AqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) { + getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdNumRows() { + return getOptions().getSpillOverThresholdNumRows(); + } + + /** + * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily + * on disk if the number of rows produced by the query exceeds the specified value. + * This is used for decreasing the memory usage during the query execution. In a + * query that iterates over a collection that contains documents, each row is a + * document, and in a query that iterates over temporary values + * (i.e. FOR i IN 1..100), each row is one of such temporary values. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path for + * the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 5000000 rows. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned off + * by default. The query results are still built up entirely in RAM on Coordinators + * and single servers for non-streaming queries. To avoid the buildup of the entire + * query result in RAM, use a streaming query (see the stream option). + * @return this + */ + public AqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) { + getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows); + return this; + } + + @JsonIgnore + public Boolean getStream() { + return getOptions().getStream(); + } + + @JsonIgnore + public Boolean getUsePlanCache() { + return getOptions().getUsePlanCache(); + } + + /** + * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not + * stored on + * the server, but calculated on the fly. Beware: long-running queries will need to hold the + * collection + * locks for as long as the query cursor exists. When set to false a query will be executed right + * away in + * its entirety. In that case query results are either returned right away (if the resultset is small + * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the + * ttl). It is advisable to only use this option on short-running queries or without exclusive locks + * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not + * work + * on streaming queries. Additionally query statistics, warnings and profiling data will only be + * available after the query is finished. The default value is false + * @return this + * @since ArangoDB 3.4.0 + */ + public AqlQueryOptions stream(final Boolean stream) { + getOptions().setStream(stream); + return this; + } + + /** + * @param usePlanCache Set this option to true to utilize a cached query plan or add the execution plan of this + * query to the cache if it’s not in the cache yet. Otherwise, the plan cache is bypassed + * (introduced in v3.12.4). + * Query plan caching can reduce the total time for processing queries by avoiding to parse, + * plan, and optimize queries over and over again that effectively have the same execution plan + * with at most some changes to bind parameter values. + * @return this + */ + public AqlQueryOptions usePlanCache(final Boolean usePlanCache) { + getOptions().setUsePlanCache(usePlanCache); + return this; + } + + @JsonIgnore + public Collection getRules() { + return getOptions().getOptimizer().getRules(); + } + + /** + * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, + * telling the + * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to + * enable + * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules + * @return this + */ + public AqlQueryOptions rules(final Collection rules) { + getOptions().getOptimizer().setRules(rules); + return this; + } + } diff --git a/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java b/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java index 519e81c28..691b4344d 100644 --- a/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java +++ b/core/src/main/java/com/arangodb/model/CollectionPropertiesOptions.java @@ -20,6 +20,9 @@ package com.arangodb.model; +import com.arangodb.entity.ReplicationFactor; +import com.fasterxml.jackson.annotation.JsonInclude; + import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -29,35 +32,83 @@ */ public final class CollectionPropertiesOptions { - private Boolean waitForSync; - private CollectionSchema schema; + private Boolean cacheEnabled; private List computedValues; + private ReplicationFactor replicationFactor; + private CollectionSchema schema; + private Boolean waitForSync; + private Integer writeConcern; public CollectionPropertiesOptions() { super(); } - public Boolean getWaitForSync() { - return waitForSync; + public Boolean getCacheEnabled() { + return cacheEnabled; } /** - * @param waitForSync If true then creating or changing a document will wait until the data has been synchronized - * to disk. - * @return options + * @param cacheEnabled Whether the in-memory hash cache for documents should be enabled for this collection. Can be + * controlled globally with the --cache.size startup option. The cache can speed up repeated + * reads of the same documents via their document keys. If the same documents are not fetched + * often or are modified frequently, then you may disable the cache to avoid the maintenance + * costs. + * @return this */ - public CollectionPropertiesOptions waitForSync(final Boolean waitForSync) { - this.waitForSync = waitForSync; + public CollectionPropertiesOptions cacheEnabled(final Boolean cacheEnabled) { + this.cacheEnabled = cacheEnabled; + return this; + } + + public List getComputedValues() { + return computedValues; + } + + /** + * @param computedValues An optional list of computed values. + * @return this + * @since ArangoDB 3.10 + */ + public CollectionPropertiesOptions computedValues(final ComputedValue... computedValues) { + if (this.computedValues == null) { + this.computedValues = new ArrayList<>(); + } + Collections.addAll(this.computedValues, computedValues); return this; } + public ReplicationFactor getReplicationFactor() { + return replicationFactor; + } + + /** + * @param replicationFactor In a cluster, this attribute determines how many copies of each shard are kept on + * different DB-Servers. The value 1 means that only one copy (no synchronous replication) + * is kept. A value of k means that k-1 replicas are kept. For SatelliteCollections, it + * needs to be the string "satellite", which matches the replication factor to the number + * of DB-Servers (Enterprise Edition only). + *

+ * Any two copies reside on different DB-Servers. Replication between them is synchronous, + * that is, every write operation to the β€œleader” copy will be replicated to all β€œfollower” + * replicas, before the write operation is reported successful. + *

+ * If a server fails, this is detected automatically and one of the servers holding copies + * take over, usually without an error being reported. + * @return this + */ + public CollectionPropertiesOptions replicationFactor(final ReplicationFactor replicationFactor) { + this.replicationFactor = replicationFactor; + return this; + } + + @JsonInclude(JsonInclude.Include.ALWAYS) public CollectionSchema getSchema() { return schema; } /** * @param schema object that specifies the collection level schema for documents - * @return options + * @return this * @since ArangoDB 3.7 */ public CollectionPropertiesOptions schema(final CollectionSchema schema) { @@ -65,20 +116,40 @@ public CollectionPropertiesOptions schema(final CollectionSchema schema) { return this; } + public Boolean getWaitForSync() { + return waitForSync; + } + /** - * @param computedValues An optional list of computed values. - * @return options - * @since ArangoDB 3.10 + * @param waitForSync If true then creating or changing a document will wait until the data has been synchronized + * to disk. + * @return this */ - public CollectionPropertiesOptions computedValues(final ComputedValue... computedValues) { - if(this.computedValues == null) { - this.computedValues = new ArrayList<>(); - } - Collections.addAll(this.computedValues, computedValues); + public CollectionPropertiesOptions waitForSync(final Boolean waitForSync) { + this.waitForSync = waitForSync; return this; } - public List getComputedValues() { - return computedValues; + public Integer getWriteConcern() { + return writeConcern; } + + /** + * @param writeConcern Determines how many copies of each shard are required to be in sync on the different + * DB-Servers. If there are less than these many copies in the cluster, a shard refuses to + * write. Writes to shards with enough up-to-date copies succeed at the same time, however. + * The value of writeConcern cannot be greater than replicationFactor. + *

+ * If distributeShardsLike is set, the default writeConcern is that of the prototype collection. + * For SatelliteCollections, the writeConcern is automatically controlled to equal the number of + * DB-Servers and has a value of 0. Otherwise, the default value is controlled by the current + * database’s default writeConcern, which uses the --cluster.write-concern startup option as + * default, which defaults to 1. (cluster only) + * @return this + */ + public CollectionPropertiesOptions writeConcern(final Integer writeConcern) { + this.writeConcern = writeConcern; + return this; + } + } diff --git a/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java new file mode 100644 index 000000000..827670cf5 --- /dev/null +++ b/core/src/main/java/com/arangodb/model/ExplainAqlQueryOptions.java @@ -0,0 +1,616 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb.model; + +import com.arangodb.internal.serde.UserDataInside; +import com.fasterxml.jackson.annotation.JsonIgnore; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +/** + * @author Michele Rastelli + */ +public final class ExplainAqlQueryOptions { + + private Map bindVars; + private String query; + private AqlQueryOptions.Options options; + + public ExplainAqlQueryOptions() { + super(); + } + + @UserDataInside + public Map getBindVars() { + return bindVars; + } + + /** + * @param bindVars key/value pairs representing the bind parameters + * @return options + */ + ExplainAqlQueryOptions bindVars(final Map bindVars) { + this.bindVars = bindVars; + return this; + } + + public String getQuery() { + return query; + } + + /** + * @param query the query which you want explained + * @return options + */ + ExplainAqlQueryOptions query(final String query) { + this.query = query; + return this; + } + + public AqlQueryOptions.Options getOptions() { + if (options == null) { + options = new AqlQueryOptions.Options(); + } + return options; + } + + public ExplainAqlQueryOptions options(final AqlQueryOptions.Options options) { + this.options = options; + return this; + } + + + // ------------------------------------ + // --- accessors for nested options --- + // ------------------------------------ + + @JsonIgnore + public Map getCustomOptions() { + return getOptions().getCustomOptions(); + } + + /** + * Set an additional custom option in the form of key-value pair. + * + * @param key option name + * @param value option value + * @return this + */ + public ExplainAqlQueryOptions customOption(String key, Object value) { + getOptions().setCustomOption(key, value); + return this; + } + + @JsonIgnore + public Boolean getAllPlans() { + return getOptions().getAllPlans(); + } + + /** + * @param value if set to true, all possible execution plans will be returned. The default is false, meaning only + * the optimal plan will be returned. + * @return this + */ + public ExplainAqlQueryOptions allPlans(final Boolean value) { + getOptions().setAllPlans(value); + return this; + } + + @JsonIgnore + public Boolean getAllowDirtyReads() { + return getOptions().getAllowDirtyReads(); + } + + /** + * @param allowDirtyReads If you set this option to true and execute the query against a cluster deployment, then + * the Coordinator is allowed to read from any shard replica and not only from the leader. + * You may observe data inconsistencies (dirty reads) when reading from followers, namely + * obsolete revisions of documents because changes have not yet been replicated to the + * follower, as well as changes to documents before they are officially committed on the + * leader. This feature is only available in the Enterprise Edition. + * @return this + */ + public ExplainAqlQueryOptions allowDirtyReads(final Boolean allowDirtyReads) { + getOptions().setAllowDirtyReads(allowDirtyReads); + return this; + } + + @JsonIgnore + public Boolean getAllowRetry() { + return getOptions().getAllowRetry(); + } + + /** + * @param allowRetry Set this option to true to make it possible to retry fetching the latest batch from a cursor. + *

+ * This makes possible to safely retry invoking {@link com.arangodb.ArangoCursor#next()} in + * case of I/O exceptions (which are actually thrown as {@link com.arangodb.ArangoDBException} + * with cause {@link java.io.IOException}) + *

+ * If set to false (default), then it is not safe to retry invoking + * {@link com.arangodb.ArangoCursor#next()} in case of I/O exceptions, since the request to + * fetch the next batch is not idempotent (i.e. the cursor may advance multiple times on the + * server). + *

+ * Note: once you successfully received the last batch, you should call + * {@link com.arangodb.ArangoCursor#close()} so that the server does not unnecessary keep the + * batch until the cursor times out ({@link AqlQueryOptions#ttl(Integer)}). + * @return this + * @since ArangoDB 3.11 + */ + public ExplainAqlQueryOptions allowRetry(final Boolean allowRetry) { + getOptions().setAllowRetry(allowRetry); + return this; + } + + @JsonIgnore + public Boolean getFailOnWarning() { + return getOptions().getFailOnWarning(); + } + + /** + * @param failOnWarning When set to true, the query will throw an exception and abort instead of producing a + * warning. This option should be used during development to catch potential issues early. + * When the attribute is set to false, warnings will not be propagated to exceptions and will + * be returned with the query result. There is also a server configuration option + * --query.fail-on-warning for setting the default value for failOnWarning so it does not + * need to be set on a per-query level. + * @return this + */ + public ExplainAqlQueryOptions failOnWarning(final Boolean failOnWarning) { + getOptions().setFailOnWarning(failOnWarning); + return this; + } + + @JsonIgnore + public Boolean getFillBlockCache() { + return getOptions().getFillBlockCache(); + } + + /** + * @param fillBlockCache if set to true or not specified, this will make the query store + * the data it reads via the RocksDB storage engine in the RocksDB block cache. This is + * usually the desired behavior. The option can be set to false for queries that + * are known to either read a lot of data that would thrash the block cache, or for queries + * that read data known to be outside of the hot set. By setting the option + * to false, data read by the query will not make it into the RocksDB block + * cache if it is not already in there, thus leaving more room for the actual hot set. + * @return this + * @since ArangoDB 3.8.1 + */ + public ExplainAqlQueryOptions fillBlockCache(final Boolean fillBlockCache) { + getOptions().setFillBlockCache(fillBlockCache); + return this; + } + + @JsonIgnore + public String getForceOneShardAttributeValue() { + return getOptions().getForceOneShardAttributeValue(); + } + + /** + * @param forceOneShardAttributeValue This query option can be used in complex queries in case the query optimizer + * cannot automatically detect that the query can be limited to only a single + * server (e.g. in a disjoint smart graph case). + *

+ * If the option is set incorrectly, i.e. to a wrong shard key value, then the + * query may be shipped to a wrong DB server and may not return results (i.e. + * empty result set). + *

+ * Use at your own risk. + * @return this + */ + public ExplainAqlQueryOptions forceOneShardAttributeValue(final String forceOneShardAttributeValue) { + getOptions().setForceOneShardAttributeValue(forceOneShardAttributeValue); + return this; + } + + @JsonIgnore + public Boolean getFullCount() { + return getOptions().getFullCount(); + } + + /** + * @param fullCount if set to true and the query contains a LIMIT clause, then the result will have an extra + * attribute + * with the sub-attributes stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } + * } }. The + * fullCount attribute will contain the number of documents in the result before the last LIMIT + * in the + * query was applied. It can be used to count the number of documents that match certain filter + * criteria, + * but only return a subset of them, in one go. It is thus similar to MySQL's + * SQL_CALC_FOUND_ROWS hint. + * Note that setting the option will disable a few LIMIT optimizations and may lead to more + * documents + * being processed, and thus make queries run longer. Note that the fullCount attribute will + * only be + * present in the result if the query has a LIMIT clause and the LIMIT clause is actually used + * in the + * query. + * @return this + */ + public ExplainAqlQueryOptions fullCount(final Boolean fullCount) { + getOptions().setFullCount(fullCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitCount() { + return getOptions().getIntermediateCommitCount(); + } + + /** + * @param intermediateCommitCount Maximum number of operations after which an intermediate commit is performed + * automatically. Honored by + * the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions intermediateCommitCount(final Long intermediateCommitCount) { + getOptions().setIntermediateCommitCount(intermediateCommitCount); + return this; + } + + @JsonIgnore + public Long getIntermediateCommitSize() { + return getOptions().getIntermediateCommitSize(); + } + + /** + * @param intermediateCommitSize Maximum total size of operations after which an intermediate commit is performed + * automatically. + * Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions intermediateCommitSize(final Long intermediateCommitSize) { + getOptions().setIntermediateCommitSize(intermediateCommitSize); + return this; + } + + @JsonIgnore + public Integer getMaxDNFConditionMembers() { + return getOptions().getMaxDNFConditionMembers(); + } + + /** + * @param maxDNFConditionMembers A threshold for the maximum number of OR sub-nodes in the internal representation + * of an AQL FILTER condition. + *

+ * Yon can use this option to limit the computation time and memory usage when + * converting complex AQL FILTER conditions into the internal DNF (disjunctive normal + * form) format. FILTER conditions with a lot of logical branches (AND, OR, NOT) can + * take a large amount of processing time and memory. This query option limits the + * computation time and memory usage for such conditions. + *

+ * Once the threshold value is reached during the DNF conversion of a FILTER + * condition, the conversion is aborted, and the query continues with a simplified + * internal representation of the condition, which cannot be used for index lookups. + *

+ * You can set the threshold globally instead of per query with the + * --query.max-dnf-condition-members startup option. + * @return this + */ + public ExplainAqlQueryOptions maxDNFConditionMembers(final Integer maxDNFConditionMembers) { + getOptions().setMaxDNFConditionMembers(maxDNFConditionMembers); + return this; + } + + @JsonIgnore + public Integer getMaxNodesPerCallstack() { + return getOptions().getMaxNodesPerCallstack(); + } + + /** + * @param maxNodesPerCallstack The number of execution nodes in the query plan after that stack splitting is + * performed to avoid a potential stack overflow. Defaults to the configured value of + * the startup option --query.max-nodes-per-callstack. + *

+ * This option is only useful for testing and debugging and normally does not need any + * adjustment. + * @return this + */ + public ExplainAqlQueryOptions maxNodesPerCallstack(final Integer maxNodesPerCallstack) { + getOptions().setMaxNodesPerCallstack(maxNodesPerCallstack); + return this; + } + + @JsonIgnore + public Integer getMaxNumberOfPlans() { + return getOptions().getMaxNumberOfPlans(); + } + + /** + * @param maxNumberOfPlans Limits the maximum number of plans that are created by the AQL query optimizer. + * @return this + */ + public ExplainAqlQueryOptions maxNumberOfPlans(final Integer maxNumberOfPlans) { + getOptions().setMaxNumberOfPlans(maxNumberOfPlans); + return this; + } + + @JsonIgnore + public Double getMaxRuntime() { + return getOptions().getMaxRuntime(); + } + + /** + * @param maxRuntime The query has to be executed within the given runtime or it will be killed. The value is specified + * in seconds. The default value is 0.0 (no timeout). + * @return this + */ + public ExplainAqlQueryOptions maxRuntime(final Double maxRuntime) { + getOptions().setMaxRuntime(maxRuntime); + return this; + } + + @JsonIgnore + public Long getMaxTransactionSize() { + return getOptions().getMaxTransactionSize(); + } + + /** + * @param maxTransactionSize Transaction size limit in bytes. Honored by the RocksDB storage engine only. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions maxTransactionSize(final Long maxTransactionSize) { + getOptions().setMaxTransactionSize(maxTransactionSize); + return this; + } + + @JsonIgnore + public Long getMaxWarningCount() { + return getOptions().getMaxWarningCount(); + } + + /** + * @param maxWarningCount Limits the maximum number of warnings a query will return. The number of warnings a + * query will return + * is limited to 10 by default, but that number can be increased or decreased by setting + * this attribute. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions maxWarningCount(final Long maxWarningCount) { + getOptions().setMaxWarningCount(maxWarningCount); + return this; + } + + @JsonIgnore + public AqlQueryOptions.Optimizer getOptimizer() { + return getOptions().getOptimizer(); + } + + /** + * @param optimizer Options related to the query optimizer. + * @return this + */ + public ExplainAqlQueryOptions optimizer(final AqlQueryOptions.Optimizer optimizer) { + getOptions().setOptimizer(optimizer); + return this; + } + + @JsonIgnore + public Boolean getProfile() { + return getOptions().getProfile(); + } + + /** + * @param profile If set to true, then the additional query profiling information will be returned in the + * sub-attribute + * profile of the extra return attribute if the query result is not served from the query cache. + * @return this + */ + public ExplainAqlQueryOptions profile(final Boolean profile) { + getOptions().setProfile(profile); + return this; + } + + @JsonIgnore + public Double getSatelliteSyncWait() { + return getOptions().getSatelliteSyncWait(); + } + + /** + * @param satelliteSyncWait This enterprise parameter allows to configure how long a DBServer will have time to + * bring the + * satellite collections involved in the query into sync. The default value is 60.0 + * (seconds). When the + * max time has been reached the query will be stopped. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions satelliteSyncWait(final Double satelliteSyncWait) { + getOptions().setSatelliteSyncWait(satelliteSyncWait); + return this; + } + + @JsonIgnore + public Collection getShardIds() { + return getOptions().getShardIds(); + } + + /** + * Restrict query to shards by given ids. This is an internal option. Use at your own risk. + * + * @param shardIds + * @return this + */ + public ExplainAqlQueryOptions shardIds(final String... shardIds) { + getOptions().setShardIds(Arrays.asList(shardIds)); + return this; + } + + @JsonIgnore + public Boolean getSkipInaccessibleCollections() { + return getOptions().getSkipInaccessibleCollections(); + } + + /** + * @param skipInaccessibleCollections AQL queries (especially graph traversals) will treat collection to which a + * user has no access rights + * as if these collections were empty. Instead of returning a forbidden access + * error, your queries will + * execute normally. This is intended to help with certain use-cases: A graph + * contains several + * collections and different users execute AQL queries on that graph. You can + * now naturally limit the + * accessible results by changing the access rights of users on collections. + * This feature is only + * available in the Enterprise Edition. + * @return this + * @since ArangoDB 3.2.0 + */ + public ExplainAqlQueryOptions skipInaccessibleCollections(final Boolean skipInaccessibleCollections) { + getOptions().setSkipInaccessibleCollections(skipInaccessibleCollections); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdMemoryUsage() { + return getOptions().getSpillOverThresholdMemoryUsage(); + } + + /** + * @param spillOverThresholdMemoryUsage This option allows queries to store intermediate and final results + * temporarily on disk if the amount of memory used (in bytes) exceeds the + * specified value. This is used for decreasing the memory usage during the + * query execution. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path + * for the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 128MB. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned + * off by default. The query results are still built up entirely in RAM on + * Coordinators and single servers for non-streaming queries. To avoid the + * buildup of the entire query result in RAM, use a streaming query (see the + * stream option). + * @return this + */ + public ExplainAqlQueryOptions spillOverThresholdMemoryUsage(final Long spillOverThresholdMemoryUsage) { + getOptions().setSpillOverThresholdMemoryUsage(spillOverThresholdMemoryUsage); + return this; + } + + @JsonIgnore + public Long getSpillOverThresholdNumRows() { + return getOptions().getSpillOverThresholdNumRows(); + } + + /** + * @param spillOverThresholdNumRows This option allows queries to store intermediate and final results temporarily + * on disk if the number of rows produced by the query exceeds the specified value. + * This is used for decreasing the memory usage during the query execution. In a + * query that iterates over a collection that contains documents, each row is a + * document, and in a query that iterates over temporary values + * (i.e. FOR i IN 1..100), each row is one of such temporary values. + *

+ * This option only has an effect on queries that use the SORT operation but + * without a LIMIT, and if you enable the spillover feature by setting a path for + * the directory to store the temporary data in with the + * --temp.intermediate-results-path startup option. + *

+ * Default value: 5000000 rows. + *

+ * Spilling data from RAM onto disk is an experimental feature and is turned off + * by default. The query results are still built up entirely in RAM on Coordinators + * and single servers for non-streaming queries. To avoid the buildup of the entire + * query result in RAM, use a streaming query (see the stream option). + * @return this + */ + public ExplainAqlQueryOptions spillOverThresholdNumRows(final Long spillOverThresholdNumRows) { + getOptions().setSpillOverThresholdNumRows(spillOverThresholdNumRows); + return this; + } + + @JsonIgnore + public Boolean getStream() { + return getOptions().getStream(); + } + + @JsonIgnore + public Boolean getUsePlanCache() { + return getOptions().getUsePlanCache(); + } + + /** + * @param stream Specify true and the query will be executed in a streaming fashion. The query result is not + * stored on + * the server, but calculated on the fly. Beware: long-running queries will need to hold the + * collection + * locks for as long as the query cursor exists. When set to false a query will be executed right + * away in + * its entirety. In that case query results are either returned right away (if the resultset is small + * enough), or stored on the arangod instance and accessible via the cursor API (with respect to the + * ttl). It is advisable to only use this option on short-running queries or without exclusive locks + * (write-locks on MMFiles). Please note that the query options cache, count and fullCount will not + * work + * on streaming queries. Additionally query statistics, warnings and profiling data will only be + * available after the query is finished. The default value is false + * @return this + * @since ArangoDB 3.4.0 + */ + public ExplainAqlQueryOptions stream(final Boolean stream) { + getOptions().setStream(stream); + return this; + } + + /** + * @param usePlanCache Set this option to true to utilize a cached query plan or add the execution plan of this + * query to the cache if it’s not in the cache yet. Otherwise, the plan cache is bypassed + * (introduced in v3.12.4). + * Query plan caching can reduce the total time for processing queries by avoiding to parse, + * plan, and optimize queries over and over again that effectively have the same execution plan + * with at most some changes to bind parameter values. + * @return this + */ + public ExplainAqlQueryOptions usePlanCache(final Boolean usePlanCache) { + getOptions().setUsePlanCache(usePlanCache); + return this; + } + + @JsonIgnore + public Collection getRules() { + return getOptions().getOptimizer().getRules(); + } + + /** + * @param rules A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, + * telling the + * optimizer to include or exclude specific rules. To disable a rule, prefix its name with a -, to + * enable + * a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules + * @return this + */ + public ExplainAqlQueryOptions rules(final Collection rules) { + getOptions().getOptimizer().setRules(rules); + return this; + } + +} diff --git a/core/src/main/java/com/arangodb/model/OptionsBuilder.java b/core/src/main/java/com/arangodb/model/OptionsBuilder.java index 11ea5b0ab..1c6d4dfc6 100644 --- a/core/src/main/java/com/arangodb/model/OptionsBuilder.java +++ b/core/src/main/java/com/arangodb/model/OptionsBuilder.java @@ -71,14 +71,21 @@ public static CollectionCreateOptions build(final CollectionCreateOptions option } public static AqlQueryOptions build(final AqlQueryOptions options, final String query, - final Map bindVars) { + final Map bindVars) { return options.query(query).bindVars(bindVars); } public static AqlQueryExplainOptions build( final AqlQueryExplainOptions options, final String query, - final Map bindVars) { + final Map bindVars) { + return options.query(query).bindVars(bindVars); + } + + public static ExplainAqlQueryOptions build( + final ExplainAqlQueryOptions options, + final String query, + final Map bindVars) { return options.query(query).bindVars(bindVars); } diff --git a/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java index 567c43ca0..1361f9c9d 100644 --- a/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java +++ b/core/src/main/java/com/arangodb/model/arangosearch/ArangoSearchCreateOptions.java @@ -23,6 +23,7 @@ import com.arangodb.entity.ViewType; import com.arangodb.entity.arangosearch.*; import com.arangodb.internal.serde.InternalSerializers; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import java.util.Arrays; @@ -217,7 +218,16 @@ public Collection getLinks() { return links; } + /** + * @deprecated for removal, use {@link #getPrimarySort()} instead + */ + @Deprecated + @JsonIgnore public Collection getPrimarySorts() { + return getPrimarySort(); + } + + public Collection getPrimarySort() { return primarySorts; } diff --git a/docker/start_db.sh b/docker/start_db.sh index 4c0c2e9a2..0ef51fa5b 100755 --- a/docker/start_db.sh +++ b/docker/start_db.sh @@ -2,7 +2,7 @@ # Configuration environment variables: # STARTER_MODE: (single|cluster|activefailover), default single -# DOCKER_IMAGE: ArangoDB docker image, default docker.io/arangodb/arangodb:latest +# DOCKER_IMAGE: ArangoDB docker image, default docker.io/arangodb/enterprise:latest # STARTER_DOCKER_IMAGE: ArangoDB Starter docker image, default docker.io/arangodb/arangodb-starter:latest # SSL: (true|false), default false # ARANGO_LICENSE_KEY: only required for ArangoDB Enterprise @@ -11,7 +11,7 @@ # STARTER_MODE=cluster SSL=true ./start_db.sh STARTER_MODE=${STARTER_MODE:=single} -DOCKER_IMAGE=${DOCKER_IMAGE:=docker.io/arangodb/arangodb:latest} +DOCKER_IMAGE=${DOCKER_IMAGE:=docker.io/arangodb/enterprise:latest} STARTER_DOCKER_IMAGE=${STARTER_DOCKER_IMAGE:=docker.io/arangodb/arangodb-starter:latest} SSL=${SSL:=false} COMPRESSION=${COMPRESSION:=false} @@ -66,7 +66,7 @@ docker run -d \ --starter.address="${GW}" \ --docker.image="${DOCKER_IMAGE}" \ --starter.local --starter.mode=${STARTER_MODE} --all.log.level=debug --all.log.output=+ --log.verbose \ - --all.server.descriptors-minimum=1024 --all.javascript.allow-admin-execute=true + --all.server.descriptors-minimum=1024 --all.javascript.allow-admin-execute=true --all.server.maximal-threads=128 wait_server() { diff --git a/docker/start_proxy.sh b/docker/start_proxy.sh new file mode 100755 index 000000000..b4e938684 --- /dev/null +++ b/docker/start_proxy.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +docker run -d \ + -e LOG_LEVEL=Info \ + -e AUTH_USER=user \ + -e AUTH_PASSWORD=password \ + --network=arangodb -p 8888:8888 \ + docker.io/kalaksi/tinyproxy:1.7 diff --git a/driver/pom.xml b/driver/pom.xml index e6d067680..963c2db1f 100644 --- a/driver/pom.xml +++ b/driver/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 arangodb-java-driver @@ -16,7 +15,6 @@ ArangoDB Java Driver - false com.arangodb.driver src/test/**/* diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json index a25499327..94919ac94 100644 --- a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/reflect-config.json @@ -270,13 +270,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "name": "com.arangodb.entity.arangosearch.ConsolidationType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -318,13 +318,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.CursorEntity", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", + "name": "com.arangodb.entity.CursorEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -377,6 +377,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzerProperties", "allDeclaredFields": true, @@ -437,12 +443,24 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.TransactionEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.ArangoDBEngine", "allDeclaredFields": true, @@ -462,13 +480,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -479,6 +497,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.AqlParseEntity$AstNode", "allDeclaredFields": true, @@ -563,6 +587,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzerProperties", "allDeclaredFields": true, @@ -570,7 +600,7 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionPlan", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -581,6 +611,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.KeyOptions", "allDeclaredFields": true, @@ -618,13 +654,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.StreamTransactionEntity", + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", + "name": "com.arangodb.entity.StreamTransactionEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1038,13 +1074,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "name": "com.arangodb.entity.arangosearch.ConsolidationType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1055,6 +1091,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.model.TransactionalOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.model.IndexOptions", "allDeclaredFields": true, @@ -1140,19 +1182,19 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionDropOptions", + "name": "com.arangodb.model.CollectionCountOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.LogLevelOptions", + "name": "com.arangodb.model.DocumentDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DBCreateOptions", + "name": "com.arangodb.model.EdgeDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1164,313 +1206,313 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlFunctionDeleteOptions", + "name": "com.arangodb.model.EdgeReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", + "name": "com.arangodb.model.VertexReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserUpdateOptions", + "name": "com.arangodb.model.DocumentUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.StreamTransactionOptions", + "name": "com.arangodb.model.GraphDocumentReadOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentReplaceOptions", + "name": "com.arangodb.model.EdgeUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DatabaseUsersOptions", + "name": "com.arangodb.model.VertexUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ViewRenameOptions", + "name": "com.arangodb.model.DocumentExistsOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", + "name": "com.arangodb.model.DocumentReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCollectionDropOptions", + "name": "com.arangodb.model.VertexDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionCreateOptions", + "name": "com.arangodb.model.EdgeCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions$Options", + "name": "com.arangodb.model.CollectionTruncateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ViewCreateOptions", + "name": "com.arangodb.model.DocumentCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", + "name": "com.arangodb.model.VertexCollectionDropOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", + "name": "com.arangodb.model.LogLevelOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryParseOptions", + "name": "com.arangodb.model.DBCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserAccessOptions", + "name": "com.arangodb.model.AqlFunctionDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ComputedValue", + "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCollectionRemoveOptions", + "name": "com.arangodb.model.StreamTransactionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", + "name": "com.arangodb.model.UserUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeUpdateOptions", + "name": "com.arangodb.model.DatabaseUsersOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionCreateOptions", + "name": "com.arangodb.model.ViewRenameOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexDeleteOptions", + "name": "com.arangodb.model.EdgeCollectionDropOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCreateOptions", + "name": "com.arangodb.model.CollectionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphCreateOptions", + "name": "com.arangodb.model.AqlQueryExplainOptions$Options", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionsReadOptions", + "name": "com.arangodb.model.ViewCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.TransactionOptions", + "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionTruncateOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionSchema", + "name": "com.arangodb.model.AqlQueryParseOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentImportOptions", + "name": "com.arangodb.model.UserAccessOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentDeleteOptions", + "name": "com.arangodb.model.ComputedValue", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeReplaceOptions", + "name": "com.arangodb.model.EdgeCollectionRemoveOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexReplaceOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphDocumentReadOptions", + "name": "com.arangodb.model.VertexCollectionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentUpdateOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexUpdateOptions", + "name": "com.arangodb.model.GraphCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.OptionsBuilder", + "name": "com.arangodb.model.CollectionsReadOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.TransactionCollectionOptions", + "name": "com.arangodb.model.TransactionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.LogOptions", + "name": "com.arangodb.model.DocumentImportOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", + "name": "com.arangodb.model.CollectionSchema", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserCreateOptions", + "name": "com.arangodb.model.OptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionPropertiesOptions", + "name": "com.arangodb.model.TransactionCollectionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DatabaseOptions", + "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionRemoveOptions", + "name": "com.arangodb.model.LogOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentCreateOptions", + "name": "com.arangodb.model.CollectionPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", + "name": "com.arangodb.model.UserCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionCountOptions", + "name": "com.arangodb.model.DatabaseOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeDeleteOptions", + "name": "com.arangodb.model.VertexCollectionRemoveOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", + "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionRenameOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", + "name": "com.arangodb.model.CollectionRenameOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentExistsOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1482,25 +1524,25 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryOptionsTest", + "name": "com.arangodb.model.AqlFunctionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlFunctionCreateOptions", + "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", + "name": "com.arangodb.model.AqlQueryExplainOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions", + "name": "com.arangodb.model.ExplainAqlQueryOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1607,4 +1649,4 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true } -] +] \ No newline at end of file diff --git a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json index 7160b9bd4..e5d77727d 100644 --- a/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json +++ b/driver/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver/serialization-config.json @@ -10,5 +10,17 @@ }, { "name": "com.arangodb.internal.net.ArangoDBRedirectException" + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument" + }, + { + "name": "com.arangodb.entity.BaseDocument" + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument" + }, + { + "name": "java.util.HashMap" } ] diff --git a/http-protocol/pom.xml b/http-protocol/pom.xml index 7a8e90328..ccf335da7 100644 --- a/http-protocol/pom.xml +++ b/http-protocol/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 http-protocol @@ -17,7 +16,6 @@ com.arangodb.http - false diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java b/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java index bb3e0f441..0efce80f6 100644 --- a/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java +++ b/http-protocol/src/main/java/com/arangodb/http/HttpConnection.java @@ -29,6 +29,7 @@ import com.arangodb.internal.RequestType; import com.arangodb.internal.config.ArangoConfig; import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionPool; import com.arangodb.internal.serde.ContentTypeFactory; import com.arangodb.internal.util.EncodeUtils; import io.netty.handler.ssl.ApplicationProtocolConfig; @@ -39,6 +40,7 @@ import io.vertx.core.Vertx; import io.vertx.core.VertxOptions; import io.vertx.core.buffer.Buffer; +import io.vertx.core.http.Http2Settings; import io.vertx.core.http.HttpHeaders; import io.vertx.core.http.HttpMethod; import io.vertx.core.http.HttpVersion; @@ -54,7 +56,6 @@ import org.slf4j.LoggerFactory; import javax.net.ssl.SSLContext; -import java.security.NoSuchAlgorithmException; import java.util.Collections; import java.util.Iterator; import java.util.Map.Entry; @@ -63,6 +64,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static com.arangodb.internal.net.ConnectionPoolImpl.HTTP1_SLOTS_PIPELINING; +import static com.arangodb.internal.net.ConnectionPoolImpl.HTTP2_SLOTS; + /** * @author Mark Vollmary @@ -81,14 +85,16 @@ public class HttpConnection implements Connection { private final WebClient client; private final Integer timeout; private final MultiMap commonHeaders = MultiMap.caseInsensitiveMultiMap(); + private final Vertx vertx; private final Vertx vertxToClose; + private final ConnectionPool pool; private static String getUserAgent() { return "JavaDriver/" + PackageVersion.VERSION + " (JVM/" + System.getProperty("java.specification.version") + ")"; } - HttpConnection(final ArangoConfig config, final HostDescription host, final Vertx existingVertx) { - super(); + HttpConnection(final ArangoConfig config, final HttpProtocolConfig protocolConfig, final HostDescription host, final ConnectionPool pool) { + this.pool = pool; Protocol protocol = config.getProtocol(); ContentType contentType = ContentTypeFactory.of(protocol); if (contentType == ContentType.VPACK) { @@ -112,20 +118,19 @@ private static String getUserAgent() { config.getUser(), Optional.ofNullable(config.getPassword()).orElse("") ).toHttpAuthorization(); - Vertx vertxToUse; - if (existingVertx != null) { + if (protocolConfig.getVertx() != null) { // reuse existing Vert.x - vertxToUse = existingVertx; + vertx = protocolConfig.getVertx(); // Vert.x will not be closed when connection is closed vertxToClose = null; LOGGER.debug("Reusing existing Vert.x instance"); } else { // create a new Vert.x instance LOGGER.debug("Creating new Vert.x instance"); - vertxToUse = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true).setEventLoopPoolSize(1)); - vertxToUse.runOnContext(e -> Thread.currentThread().setName("adb-http-" + THREAD_COUNT.getAndIncrement())); + vertx = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true).setEventLoopPoolSize(1)); + vertx.runOnContext(e -> Thread.currentThread().setName("adb-http-" + THREAD_COUNT.getAndIncrement())); // Vert.x be closed when connection is closed - vertxToClose = vertxToUse; + vertxToClose = vertx; } int intTtl = Optional.ofNullable(config.getConnectionTtl()) @@ -148,30 +153,25 @@ private static String getUserAgent() { .setLogActivity(true) .setKeepAlive(true) .setTcpKeepAlive(true) - .setPipelining(true) + .setPipelining(config.getPipelining()) + .setPipeliningLimit(HTTP1_SLOTS_PIPELINING) + .setHttp2MultiplexingLimit(HTTP2_SLOTS) .setReuseAddress(true) .setReusePort(true) .setHttp2ClearTextUpgrade(false) .setProtocolVersion(httpVersion) .setDefaultHost(host.getHost()) - .setDefaultPort(host.getPort()); + .setDefaultPort(host.getPort()) + .setProxyOptions(protocolConfig.getProxyOptions()) + .setHttp2ConnectionWindowSize(config.getConnectionWindowSize()) + .setInitialSettings(new Http2Settings().setInitialWindowSize(config.getInitialWindowSize())); if (compression != Compression.NONE) { webClientOptions.setTryUseCompression(true); } if (Boolean.TRUE.equals(config.getUseSsl())) { - SSLContext ctx; - if (config.getSslContext() != null) { - ctx = config.getSslContext(); - } else { - try { - ctx = SSLContext.getDefault(); - } catch (NoSuchAlgorithmException e) { - throw ArangoDBException.of(e); - } - } - + SSLContext ctx = config.getSslContext(); webClientOptions .setSsl(true) .setUseAlpn(true) @@ -204,7 +204,7 @@ public SslContextFactory sslContextFactory() { }); } - client = WebClient.create(vertxToUse, webClientOptions); + client = WebClient.create(vertx, webClientOptions); } private static String buildUrl(final InternalRequest request) { @@ -264,6 +264,11 @@ private HttpMethod requestTypeToHttpMethod(RequestType requestType) { } } + @Override + public void release() { + vertx.runOnContext(__ -> pool.release(this)); + } + @Override @UnstableApi public CompletableFuture executeAsync(@UnstableApi final InternalRequest request) { @@ -272,7 +277,7 @@ public CompletableFuture executeAsync(@UnstableApi final Inter return rfuture; } - public void doExecute(@UnstableApi final InternalRequest request, @UnstableApi final CompletableFuture rfuture) { + private void doExecute(@UnstableApi final InternalRequest request, @UnstableApi final CompletableFuture rfuture) { String path = buildUrl(request); HttpRequest httpRequest = client .request(requestTypeToHttpMethod(request.getRequestType()), path) diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java b/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java index edde3da19..72c8c9086 100644 --- a/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java +++ b/http-protocol/src/main/java/com/arangodb/http/HttpConnectionFactory.java @@ -26,6 +26,7 @@ import com.arangodb.internal.config.ArangoConfig; import com.arangodb.internal.net.Connection; import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.ConnectionPool; import io.vertx.core.Vertx; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,12 +35,11 @@ public class HttpConnectionFactory implements ConnectionFactory { private final Logger LOGGER = LoggerFactory.getLogger(HttpConnectionFactory.class); - private final Vertx vertx; + final HttpProtocolConfig protocolConfig; - public HttpConnectionFactory(@UnstableApi final HttpProtocolConfig config) { - HttpProtocolConfig cfg = config != null ? config : HttpProtocolConfig.builder().build(); - vertx = cfg.getVertx(); - if (vertx == null && !PackageVersion.SHADED && Vertx.currentContext() != null) { + public HttpConnectionFactory(@UnstableApi final HttpProtocolConfig cfg) { + protocolConfig = cfg != null ? cfg : HttpProtocolConfig.builder().build(); + if (protocolConfig.getVertx() == null && !PackageVersion.SHADED && Vertx.currentContext() != null) { LOGGER.warn("Found an existing Vert.x instance, you can reuse it by setting:\n" + "new ArangoDB.Builder()\n" + " // ...\n" + @@ -50,7 +50,9 @@ public HttpConnectionFactory(@UnstableApi final HttpProtocolConfig config) { @Override @UnstableApi - public Connection create(@UnstableApi final ArangoConfig config, final HostDescription host) { - return new HttpConnection(config, host, vertx); + public Connection create(@UnstableApi final ArangoConfig config, + final HostDescription host, + @UnstableApi final ConnectionPool pool) { + return new HttpConnection(config, protocolConfig, host, pool); } } diff --git a/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java index 26c47f825..7a62dc505 100644 --- a/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java +++ b/http-protocol/src/main/java/com/arangodb/http/HttpProtocolConfig.java @@ -2,9 +2,11 @@ import com.arangodb.config.ProtocolConfig; import io.vertx.core.Vertx; +import io.vertx.core.net.ProxyOptions; public final class HttpProtocolConfig implements ProtocolConfig { private final Vertx vertx; + private final ProxyOptions proxyOptions; public static Builder builder() { return new Builder(); @@ -12,6 +14,7 @@ public static Builder builder() { public static class Builder { private Vertx vertx; + private ProxyOptions proxyOptions; private Builder() { } @@ -27,16 +30,30 @@ public Builder vertx(Vertx vertx) { return this; } + /** + * @param proxyOptions proxy options for HTTP connections + * @return this builder + */ + public Builder proxyOptions(ProxyOptions proxyOptions) { + this.proxyOptions = proxyOptions; + return this; + } + public HttpProtocolConfig build() { - return new HttpProtocolConfig(vertx); + return new HttpProtocolConfig(vertx, proxyOptions); } } - private HttpProtocolConfig(Vertx vertx) { + private HttpProtocolConfig(Vertx vertx, ProxyOptions proxyOptions) { this.vertx = vertx; + this.proxyOptions = proxyOptions; } public Vertx getVertx() { return vertx; } + + public ProxyOptions getProxyOptions() { + return proxyOptions; + } } diff --git a/jackson-serde-json/pom.xml b/jackson-serde-json/pom.xml index d5beb16a1..2210e0a14 100644 --- a/jackson-serde-json/pom.xml +++ b/jackson-serde-json/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 jackson-serde-json @@ -17,7 +16,6 @@ com.arangodb.serde.jackson.json - false diff --git a/jackson-serde-vpack/pom.xml b/jackson-serde-vpack/pom.xml index aba611ba9..80c29f18c 100644 --- a/jackson-serde-vpack/pom.xml +++ b/jackson-serde-vpack/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 jackson-serde-vpack @@ -17,7 +16,6 @@ com.arangodb.serde.jackson.vpack - false diff --git a/jsonb-serde/pom.xml b/jsonb-serde/pom.xml index 17f437e0c..e6d9d155b 100644 --- a/jsonb-serde/pom.xml +++ b/jsonb-serde/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 jsonb-serde @@ -17,7 +16,6 @@ com.arangodb.serde.jsonb - false diff --git a/pom.xml b/pom.xml index 2caf6123e..2d3e4e286 100644 --- a/pom.xml +++ b/pom.xml @@ -5,10 +5,9 @@ com.arangodb arangodb-java-driver-parent - 7.10.0 + 7.23.0 2016 - release-parent core driver shaded @@ -37,12 +36,12 @@ 8 8 UTF-8 - true https://sonarcloud.io arangodb-1 target/spotbugsXml.xml site/jacoco/jacoco.xml - 23.1.1 + 25.0.0 + @@ -76,26 +75,203 @@ test-functional test-non-functional test-resilience + test-perf + + static-code-analysis + + + + com.github.spotbugs + spotbugs-maven-plugin + 4.9.6.0 + + spotbugs/spotbugs-exclude.xml + + + + compile + + check + + + + + + com.github.spotbugs + spotbugs + 4.7.3 + + + + + org.jacoco + jacoco-maven-plugin + + + + report + + + + + ../test-functional/target/jacoco.exec + + XML + + + + + + + + release + + + maven.test.skip + true + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.3.1 + + + + jar + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + + jar + + + + com.arangodb.internal, + com.arangodb.internal.*, + com.arangodb.serde.jackson.internal, + javax.* + + none + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 3.2.8 + + + --pinentry-mode + loopback + + + + + sign-artifacts + verify + + sign + + + + + + org.codehaus.mojo + flatten-maven-plugin + + oss + + + + + flatten + package + + flatten + + + + + flatten.clean + clean + + clean + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + ${project.name} + ${project.version} + ${moduleName} + + + true + + + + org.apache.maven.plugins + maven-clean-plugin + + + + ${project.basedir} + + **/dependency-reduced-pom.xml + + + + + + + org.sonatype.central + central-publishing-maven-plugin + 0.9.0 + true + + central + true + published + + + + + - - io.vertx - vertx-stack-depchain - 4.5.7 - pom - import - com.fasterxml.jackson jackson-bom - 2.18.0 + 2.20.0 import pom + + io.vertx + vertx-stack-depchain + 4.5.21 + pom + import + com.google.code.findbugs jsr305 @@ -104,7 +280,12 @@ org.slf4j slf4j-api - 2.0.9 + 2.0.17 + + + jakarta.json + jakarta.json-api + 2.1.3 com.arangodb @@ -139,7 +320,7 @@ com.arangodb jackson-dataformat-velocypack - 4.4.0 + 4.6.2 com.arangodb @@ -172,7 +353,7 @@ org.apache.maven.plugins maven-enforcer-plugin - 3.3.0 + 3.6.2 enforce @@ -185,6 +366,7 @@ compile 1.8 + jakarta.json:jakarta.json-api jakarta.json.bind:jakarta.json.bind-api @@ -192,7 +374,7 @@ - 3.6 + 3.6.3 @@ -202,39 +384,72 @@ org.codehaus.mojo extra-enforcer-rules - 1.8.0 + 1.11.0 - org.apache.maven.plugins - maven-deploy-plugin - 3.1.1 - - 10 - - - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.13 - true + org.codehaus.mojo + versions-maven-plugin + 2.19.1 - ossrh - https://oss.sonatype.org/ - 84aff6e87e214c - false - ${maven.deploy.skip} + + + + + + regex + (?i).*(alpha|beta|m|rc).*(\d+)? + + + + + io.vertx + + + regex + 5..* + + + + + io.netty + + + regex + .* + + + + + org.junit.jupiter + + + regex + 6..* + + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + org.apache.maven.plugins maven-compiler-plugin - 3.11.0 + 3.14.1 -Xlint:unchecked @@ -254,22 +469,22 @@ org.apache.maven.plugins maven-clean-plugin - 3.4.0 + 3.5.0 org.apache.maven.plugins maven-install-plugin - 3.1.2 + 3.1.4 org.apache.maven.plugins maven-site-plugin - 3.12.1 + 3.21.0 org.apache.maven.plugins maven-surefire-plugin - 3.0.0 + 3.5.4 true @@ -277,12 +492,12 @@ org.codehaus.mojo build-helper-maven-plugin - 3.3.0 + 3.6.1 org.jacoco jacoco-maven-plugin - 0.8.12 + 0.8.13 org.apache.maven.plugins @@ -292,12 +507,12 @@ org.sonarsource.scanner.maven sonar-maven-plugin - 4.0.0.4121 + 5.2.0.4988 org.apache.maven.plugins maven-shade-plugin - 3.4.1 + 3.6.1 com.google.code.maven-replacer-plugin @@ -307,22 +522,34 @@ org.apache.maven.plugins maven-surefire-report-plugin - 3.4.0 + 3.5.4 + + + org.codehaus.mojo + flatten-maven-plugin + 1.7.3 + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.12.0 - - - ossrh - https://oss.sonatype.org/content/repositories/snapshots - + - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ + oss.sonatype.org-snapshot + https://oss.sonatype.org/content/repositories/snapshots + + false + + + true + - + https://github.com/arangodb/arangodb-java-driver diff --git a/release-parent/pom.xml b/release-parent/pom.xml deleted file mode 100644 index 799861c37..000000000 --- a/release-parent/pom.xml +++ /dev/null @@ -1,184 +0,0 @@ - - - 4.0.0 - - com.arangodb - arangodb-java-driver-parent - 7.10.0 - - pom - - release-parent - release-parent - Parent for releasable modules - - - - true - - - - - - org.apache.maven.plugins - maven-source-plugin - 3.2.1 - - - - jar - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 3.8.0 - - - - jar - - - - com.arangodb.internal, - com.arangodb.internal.*, - com.arangodb.serde.jackson.internal, - javax.* - - none - - - - - - org.apache.maven.plugins - maven-gpg-plugin - 3.0.1 - - - --pinentry-mode - loopback - - - - - sign-artifacts - verify - - sign - - - - - - org.codehaus.mojo - flatten-maven-plugin - 1.4.1 - - oss - - - - - flatten - package - - flatten - - - - - flatten.clean - clean - - clean - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - ${project.name} - ${project.version} - ${moduleName} - - - true - - - - org.apache.maven.plugins - maven-clean-plugin - - - - ${project.basedir} - - **/dependency-reduced-pom.xml - - - - - - - - - - - static-code-analysis - - - - com.github.spotbugs - spotbugs-maven-plugin - 4.7.3.4 - - spotbugs/spotbugs-exclude.xml - - - - compile - - check - - - - - - com.github.spotbugs - spotbugs - 4.7.3 - - - - - org.jacoco - jacoco-maven-plugin - - - - report - - - - - ../test-functional/target/jacoco.exec - - XML - - - - - - - - - \ No newline at end of file diff --git a/shaded/pom.xml b/shaded/pom.xml index 3386da2fd..6c970dd65 100644 --- a/shaded/pom.xml +++ b/shaded/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 arangodb-java-driver-shaded @@ -17,7 +16,6 @@ com.arangodb.driver - false src/main/java/graal/**/* @@ -47,6 +45,11 @@ slf4j-api compile + + jakarta.json + jakarta.json-api + compile + com.arangodb @@ -82,6 +85,7 @@ org.slf4j:slf4j-api + jakarta.json:jakarta.json-api @@ -158,6 +162,12 @@ META-INF/** + + com.fasterxml.jackson.datatype:jackson-datatype-jakarta-jsonp + + META-INF/MANIFEST.MF + + @@ -195,7 +205,7 @@ - ${project.parent.parent.basedir}/driver/target/arangodb-java-driver-${project.version}-javadoc.jar + ${project.parent.basedir}/driver/target/arangodb-java-driver-${project.version}-javadoc.jar jar javadoc diff --git a/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java b/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java index fb9b8b1bb..d910cf1e7 100644 --- a/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java +++ b/shaded/src/main/java/graal/netty/graal/NettySubstitutions.java @@ -1,45 +1,71 @@ package graal.netty.graal; +import static io.netty.handler.codec.http.HttpHeaderValues.BR; +import static io.netty.handler.codec.http.HttpHeaderValues.DEFLATE; +import static io.netty.handler.codec.http.HttpHeaderValues.GZIP; +import static io.netty.handler.codec.http.HttpHeaderValues.X_DEFLATE; +import static io.netty.handler.codec.http.HttpHeaderValues.X_GZIP; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.InvalidAlgorithmParameterException; +import java.security.KeyException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.Provider; +import java.security.SecureRandom; +import java.security.cert.X509Certificate; +import java.security.spec.InvalidKeySpecException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.function.BooleanSupplier; + +import javax.crypto.NoSuchPaddingException; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; + import com.oracle.svm.core.annotate.Alias; import com.oracle.svm.core.annotate.RecomputeFieldValue; import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; + import graal.netty.EmptyByteBufStub; import io.netty.bootstrap.AbstractBootstrapConfig; import io.netty.bootstrap.ChannelFactory; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.*; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.DefaultChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.BrotliDecoder; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; -import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http2.Http2Exception; -import io.netty.handler.ssl.*; +import io.netty.handler.ssl.ApplicationProtocolConfig; import io.netty.handler.ssl.ApplicationProtocolConfig.SelectorFailureBehavior; +import io.netty.handler.ssl.CipherSuiteFilter; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator; +import io.netty.handler.ssl.JdkApplicationProtocolNegotiator; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextOption; +import io.netty.handler.ssl.SslProvider; import io.netty.util.concurrent.GlobalEventExecutor; import io.netty.util.internal.logging.InternalLoggerFactory; import io.netty.util.internal.logging.JdkLoggerFactory; -import javax.crypto.NoSuchPaddingException; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLException; -import javax.net.ssl.TrustManagerFactory; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.security.*; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.util.*; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.function.BooleanSupplier; - -import static io.netty.handler.codec.http.HttpHeaderValues.*; - /** * This substitution avoid having loggers added to the build */ @@ -140,15 +166,14 @@ public static boolean isCipherSuiteAvailable(String cipherSuite) { @TargetClass(className = "io.netty.handler.ssl.JdkSslServerContext") final class Target_io_netty_handler_ssl_JdkSslServerContext { - @Alias Target_io_netty_handler_ssl_JdkSslServerContext(Provider provider, - X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, - X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, - KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, - ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, - ClientAuth clientAuth, String[] protocols, boolean startTls, - String keyStore) + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, + SecureRandom secureRandom, String keyStore, Target_io_netty_handler_ssl_ResumptionController resumptionController) throws SSLException { } } @@ -157,13 +182,13 @@ final class Target_io_netty_handler_ssl_JdkSslServerContext { final class Target_io_netty_handler_ssl_JdkSslClientContext { @Alias - Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, X509Certificate[] trustCertCollection, - TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, - String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, - CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, - long sessionCacheSize, long sessionTimeout, String keyStoreType) - throws SSLException { - + Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, + SecureRandom secureRandom, String keyStoreType, String endpointIdentificationAlgorithm, + Target_io_netty_handler_ssl_ResumptionController resumptionController) throws SSLException { } } @@ -183,7 +208,7 @@ final class Target_io_netty_handler_ssl_SslHandler$SslEngineType { final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator_AlpnWrapper { @Substitute public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, - JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { + JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { return (SSLEngine) (Object) new Target_io_netty_handler_ssl_JdkAlpnSslEngine(engine, applicationNegotiator, isServer); } @@ -194,7 +219,16 @@ public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { @Alias Target_io_netty_handler_ssl_JdkAlpnSslEngine(final SSLEngine engine, - final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + + } +} + +@TargetClass(className = "io.netty.handler.ssl.ResumptionController") +final class Target_io_netty_handler_ssl_ResumptionController { + + @Alias + Target_io_netty_handler_ssl_ResumptionController() { } } @@ -203,37 +237,42 @@ final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { final class Target_io_netty_handler_ssl_SslContext { @Substitute - static SslContext newServerContextInternal(SslProvider provider, Provider sslContextProvider, - X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, - X509Certificate[] keyCertChain, - PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, - CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, - ClientAuth clientAuth, String[] protocols, boolean startTls, boolean enableOcsp, String keyStoreType, - Map.Entry, Object>... ctxOptions) throws SSLException { + static SslContext newServerContextInternal(SslProvider provider, + Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, + Iterable ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, + long sessionCacheSize, long sessionTimeout, ClientAuth clientAuth, String[] protocols, boolean startTls, + boolean enableOcsp, SecureRandom secureRandom, String keyStoreType, + Map.Entry, Object>... ctxOptions) throws SSLException { if (enableOcsp) { throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); } + Target_io_netty_handler_ssl_ResumptionController resumptionController = new Target_io_netty_handler_ssl_ResumptionController(); return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslServerContext(sslContextProvider, trustCertCollection, trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory, ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout, - clientAuth, protocols, startTls, keyStoreType); + clientAuth, protocols, startTls, secureRandom, keyStoreType, resumptionController); } @Substitute - static SslContext newClientContextInternal(SslProvider provider, Provider sslContextProvider, - X509Certificate[] trustCert, - TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, - KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, - ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, - boolean enableOcsp, - String keyStoreType, Map.Entry, Object>... options) throws SSLException { + static SslContext newClientContextInternal(SslProvider provider, + Provider sslContextProvider, + X509Certificate[] trustCert, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, + Iterable ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, + long sessionCacheSize, long sessionTimeout, boolean enableOcsp, + SecureRandom secureRandom, String keyStoreType, String endpointIdentificationAlgorithm, + Map.Entry, Object>... options) throws SSLException { if (enableOcsp) { throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); } + Target_io_netty_handler_ssl_ResumptionController resumptionController = new Target_io_netty_handler_ssl_ResumptionController(); return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslClientContext(sslContextProvider, trustCert, trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory, ciphers, cipherFilter, apn, protocols, sessionCacheSize, - sessionTimeout, keyStoreType); + sessionTimeout, secureRandom, keyStoreType, endpointIdentificationAlgorithm, + resumptionController); } } @@ -368,22 +407,6 @@ private static Queue newTaskQueue0(int maxPendingTasks) { } } -@TargetClass(className = "io.netty.buffer.AbstractReferenceCountedByteBuf") -final class Target_io_netty_buffer_AbstractReferenceCountedByteBuf { - - @Alias - @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") - private static long REFCNT_FIELD_OFFSET; -} - -@TargetClass(className = "io.netty.util.AbstractReferenceCounted") -final class Target_io_netty_util_AbstractReferenceCounted { - - @Alias - @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") - private static long REFCNT_FIELD_OFFSET; -} - // This class is runtime-initialized by NettyProcessor final class Holder_io_netty_util_concurrent_ScheduledFutureTask { static final long START_TIME = System.nanoTime(); @@ -433,11 +456,11 @@ static Class tryToLoadClass(final ClassLoader loader, final Class helper) final class Target_io_netty_buffer_EmptyByteBuf { @Alias - @RecomputeFieldValue(kind = Kind.Reset) + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset) private static ByteBuffer EMPTY_BYTE_BUFFER; @Alias - @RecomputeFieldValue(kind = Kind.Reset) + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset) private static long EMPTY_BYTE_BUFFER_ADDRESS; @Substitute @@ -494,6 +517,10 @@ protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Excep return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } // 'identity' or unsupported return null; @@ -509,20 +536,23 @@ final class Target_io_netty_handler_codec_http2_DelegatingDecompressorFrameListe @Substitute protected EmbeddedChannel newContentDecompressor(ChannelHandlerContext ctx, CharSequence contentEncoding) throws Http2Exception { - if (!HttpHeaderValues.GZIP.contentEqualsIgnoreCase(contentEncoding) - && !HttpHeaderValues.X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { - if (!HttpHeaderValues.DEFLATE.contentEqualsIgnoreCase(contentEncoding) - && !HttpHeaderValues.X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { - return null; - } else { - ZlibWrapper wrapper = this.strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; - return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), - new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(wrapper) }); - } - } else { - return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), - new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP) }); + if (GZIP.contentEqualsIgnoreCase(contentEncoding) || X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP)); } + if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + final ZlibWrapper wrapper = strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + // To be strict, 'deflate' means ZLIB, but some servers were not implemented correctly. + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); + } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } + + // 'identity' or unsupported + return null; } } @@ -588,6 +618,14 @@ private static PrivateKey getPrivateKeyFromByteBuffer(ByteBuf encodedKeyBuf, Str } } +@TargetClass(className = "io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess") +final class Target_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) + public static int LONG_ELEMENT_SHIFT; +} + class IsBouncyNotThere implements BooleanSupplier { @Override diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json index a25499327..94919ac94 100644 --- a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/reflect-config.json @@ -270,13 +270,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "name": "com.arangodb.entity.arangosearch.ConsolidationType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -318,13 +318,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.CursorEntity", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoPointAnalyzerProperties", + "name": "com.arangodb.entity.CursorEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -377,6 +377,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionStats", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.arangosearch.analyzer.StopwordsAnalyzerProperties", "allDeclaredFields": true, @@ -437,12 +443,24 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionVariable", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.TransactionEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.ArangoDBEngine", "allDeclaredFields": true, @@ -462,13 +480,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", + "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.DelimiterAnalyzerProperties", + "name": "com.arangodb.entity.AqlExecutionExplainEntity$ExecutionStats", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -479,6 +497,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionNode", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.AqlParseEntity$AstNode", "allDeclaredFields": true, @@ -563,6 +587,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.arangosearch.analyzer.MinHashAnalyzerProperties", "allDeclaredFields": true, @@ -570,7 +600,7 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationPolicy", + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionPlan", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -581,6 +611,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.entity.AqlQueryExplainEntity$ExecutionCollection", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.entity.KeyOptions", "allDeclaredFields": true, @@ -618,13 +654,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.StreamTransactionEntity", + "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.WildcardAnalyzerProperties", + "name": "com.arangodb.entity.StreamTransactionEntity", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1038,13 +1074,13 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", + "name": "com.arangodb.entity.arangosearch.ConsolidationType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.entity.arangosearch.ConsolidationType", + "name": "com.arangodb.entity.arangosearch.analyzer.GeoS2AnalyzerProperties$GeoS2AnalyzerType", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1055,6 +1091,12 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true }, + { + "name": "com.arangodb.model.TransactionalOptions", + "allDeclaredFields": true, + "allDeclaredMethods": true, + "allDeclaredConstructors": true + }, { "name": "com.arangodb.model.IndexOptions", "allDeclaredFields": true, @@ -1140,19 +1182,19 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionDropOptions", + "name": "com.arangodb.model.CollectionCountOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.LogLevelOptions", + "name": "com.arangodb.model.DocumentDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DBCreateOptions", + "name": "com.arangodb.model.EdgeDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1164,313 +1206,313 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlFunctionDeleteOptions", + "name": "com.arangodb.model.EdgeReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", + "name": "com.arangodb.model.VertexReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserUpdateOptions", + "name": "com.arangodb.model.DocumentUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.StreamTransactionOptions", + "name": "com.arangodb.model.GraphDocumentReadOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentReplaceOptions", + "name": "com.arangodb.model.EdgeUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DatabaseUsersOptions", + "name": "com.arangodb.model.VertexUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ViewRenameOptions", + "name": "com.arangodb.model.DocumentExistsOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", + "name": "com.arangodb.model.DocumentReplaceOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCollectionDropOptions", + "name": "com.arangodb.model.VertexDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionCreateOptions", + "name": "com.arangodb.model.EdgeCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions$Options", + "name": "com.arangodb.model.CollectionTruncateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ViewCreateOptions", + "name": "com.arangodb.model.DocumentCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", + "name": "com.arangodb.model.VertexCollectionDropOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", + "name": "com.arangodb.model.LogLevelOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryParseOptions", + "name": "com.arangodb.model.DBCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserAccessOptions", + "name": "com.arangodb.model.AqlFunctionDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ComputedValue", + "name": "com.arangodb.model.arangosearch.AnalyzerDeleteOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCollectionRemoveOptions", + "name": "com.arangodb.model.StreamTransactionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", + "name": "com.arangodb.model.UserUpdateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeUpdateOptions", + "name": "com.arangodb.model.DatabaseUsersOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionCreateOptions", + "name": "com.arangodb.model.ViewRenameOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexDeleteOptions", + "name": "com.arangodb.model.EdgeCollectionDropOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeCreateOptions", + "name": "com.arangodb.model.CollectionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphCreateOptions", + "name": "com.arangodb.model.AqlQueryExplainOptions$Options", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionsReadOptions", + "name": "com.arangodb.model.ViewCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.TransactionOptions", + "name": "com.arangodb.model.VertexCollectionCreateOptions$Options", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionTruncateOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchOptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionSchema", + "name": "com.arangodb.model.AqlQueryParseOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentImportOptions", + "name": "com.arangodb.model.UserAccessOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentDeleteOptions", + "name": "com.arangodb.model.ComputedValue", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeReplaceOptions", + "name": "com.arangodb.model.EdgeCollectionRemoveOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexReplaceOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphDocumentReadOptions", + "name": "com.arangodb.model.VertexCollectionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentUpdateOptions", + "name": "com.arangodb.model.arangosearch.ArangoSearchCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexUpdateOptions", + "name": "com.arangodb.model.GraphCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.OptionsBuilder", + "name": "com.arangodb.model.CollectionsReadOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.TransactionCollectionOptions", + "name": "com.arangodb.model.TransactionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.LogOptions", + "name": "com.arangodb.model.DocumentImportOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", + "name": "com.arangodb.model.CollectionSchema", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.UserCreateOptions", + "name": "com.arangodb.model.OptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionPropertiesOptions", + "name": "com.arangodb.model.TransactionCollectionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DatabaseOptions", + "name": "com.arangodb.model.ReplaceEdgeDefinitionOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.VertexCollectionRemoveOptions", + "name": "com.arangodb.model.LogOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentCreateOptions", + "name": "com.arangodb.model.CollectionPropertiesOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", + "name": "com.arangodb.model.UserCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionCountOptions", + "name": "com.arangodb.model.DatabaseOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.EdgeDeleteOptions", + "name": "com.arangodb.model.VertexCollectionRemoveOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", + "name": "com.arangodb.model.GraphCreateOptions$SmartOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.CollectionRenameOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasOptionsBuilder", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", + "name": "com.arangodb.model.CollectionRenameOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.DocumentExistsOptions", + "name": "com.arangodb.model.arangosearch.SearchAliasCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1482,25 +1524,25 @@ "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryOptionsTest", + "name": "com.arangodb.model.AqlFunctionCreateOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlFunctionCreateOptions", + "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions$Optimizer", + "name": "com.arangodb.model.AqlQueryExplainOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true }, { - "name": "com.arangodb.model.AqlQueryExplainOptions", + "name": "com.arangodb.model.ExplainAqlQueryOptions", "allDeclaredFields": true, "allDeclaredMethods": true, "allDeclaredConstructors": true @@ -1607,4 +1649,4 @@ "allDeclaredMethods": true, "allDeclaredConstructors": true } -] +] \ No newline at end of file diff --git a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json index 7160b9bd4..e5d77727d 100644 --- a/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json +++ b/shaded/src/main/resources/META-INF/native-image/com.arangodb/arangodb-java-driver-shaded/serialization-config.json @@ -10,5 +10,17 @@ }, { "name": "com.arangodb.internal.net.ArangoDBRedirectException" + }, + { + "name": "com.arangodb.entity.AbstractBaseDocument" + }, + { + "name": "com.arangodb.entity.BaseDocument" + }, + { + "name": "com.arangodb.entity.BaseEdgeDocument" + }, + { + "name": "java.util.HashMap" } ] diff --git a/test-functional/pom.xml b/test-functional/pom.xml index bb960521d..13a830103 100644 --- a/test-functional/pom.xml +++ b/test-functional/pom.xml @@ -8,14 +8,19 @@ ../test-parent com.arangodb test-parent - 7.10.0 + 7.23.0 test-functional - - true - + + + org.eclipse.parsson + parsson + 1.1.7 + test + + @@ -34,10 +39,11 @@ **/CustomSerdeTest.**, - **/SerdeTest.**, - **/SerializableTest.**, + **/CustomSerdeAsyncTest.**, **/JacksonInterferenceTest.**, - **/JacksonRequestContextTest.** + **/JacksonRequestContextTest.**, + **/HttpProxyTest.**, + **/RequestContextTest.** @@ -48,6 +54,18 @@ com.fasterxml.jackson.databind.ObjectNode com.arangodb.shaded.fasterxml.jackson.databind.ObjectNode + + com.fasterxml.jackson.databind.node + com.arangodb.shaded.fasterxml.jackson.databind.node + + + com.fasterxml.jackson.databind.ObjectMapper + com.arangodb.shaded.fasterxml.jackson.databind.ObjectMapper + + + com.fasterxml.jackson.core.JsonProcessingException + com.arangodb.shaded.fasterxml.jackson.core.JsonProcessingException + @@ -101,7 +119,19 @@ - src/test-ssl/java + src/test-ssl/java + + + + no-ssl + + + ssl + !true + + + + src/test/java @@ -117,7 +147,7 @@ org.graalvm.buildtools native-maven-plugin - 0.10.2 + 0.11.1 true @@ -141,6 +171,23 @@ + + no-native + + + native + !true + + + + + io.qameta.allure + allure-junit5 + 2.29.1 + test + + + static-code-analysis diff --git a/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java b/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java index fb9b8b1bb..d910cf1e7 100644 --- a/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java +++ b/test-functional/src/test-default/java/graal/netty/graal/NettySubstitutions.java @@ -1,45 +1,71 @@ package graal.netty.graal; +import static io.netty.handler.codec.http.HttpHeaderValues.BR; +import static io.netty.handler.codec.http.HttpHeaderValues.DEFLATE; +import static io.netty.handler.codec.http.HttpHeaderValues.GZIP; +import static io.netty.handler.codec.http.HttpHeaderValues.X_DEFLATE; +import static io.netty.handler.codec.http.HttpHeaderValues.X_GZIP; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.InvalidAlgorithmParameterException; +import java.security.KeyException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.Provider; +import java.security.SecureRandom; +import java.security.cert.X509Certificate; +import java.security.spec.InvalidKeySpecException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.function.BooleanSupplier; + +import javax.crypto.NoSuchPaddingException; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; + import com.oracle.svm.core.annotate.Alias; import com.oracle.svm.core.annotate.RecomputeFieldValue; import com.oracle.svm.core.annotate.RecomputeFieldValue.Kind; import com.oracle.svm.core.annotate.Substitute; import com.oracle.svm.core.annotate.TargetClass; + import graal.netty.EmptyByteBufStub; import io.netty.bootstrap.AbstractBootstrapConfig; import io.netty.bootstrap.ChannelFactory; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.*; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.DefaultChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.BrotliDecoder; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; -import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http2.Http2Exception; -import io.netty.handler.ssl.*; +import io.netty.handler.ssl.ApplicationProtocolConfig; import io.netty.handler.ssl.ApplicationProtocolConfig.SelectorFailureBehavior; +import io.netty.handler.ssl.CipherSuiteFilter; +import io.netty.handler.ssl.ClientAuth; +import io.netty.handler.ssl.JdkAlpnApplicationProtocolNegotiator; +import io.netty.handler.ssl.JdkApplicationProtocolNegotiator; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextOption; +import io.netty.handler.ssl.SslProvider; import io.netty.util.concurrent.GlobalEventExecutor; import io.netty.util.internal.logging.InternalLoggerFactory; import io.netty.util.internal.logging.JdkLoggerFactory; -import javax.crypto.NoSuchPaddingException; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLException; -import javax.net.ssl.TrustManagerFactory; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.security.*; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.util.*; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.function.BooleanSupplier; - -import static io.netty.handler.codec.http.HttpHeaderValues.*; - /** * This substitution avoid having loggers added to the build */ @@ -140,15 +166,14 @@ public static boolean isCipherSuiteAvailable(String cipherSuite) { @TargetClass(className = "io.netty.handler.ssl.JdkSslServerContext") final class Target_io_netty_handler_ssl_JdkSslServerContext { - @Alias Target_io_netty_handler_ssl_JdkSslServerContext(Provider provider, - X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, - X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, - KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, - ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, - ClientAuth clientAuth, String[] protocols, boolean startTls, - String keyStore) + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, + ClientAuth clientAuth, String[] protocols, boolean startTls, + SecureRandom secureRandom, String keyStore, Target_io_netty_handler_ssl_ResumptionController resumptionController) throws SSLException { } } @@ -157,13 +182,13 @@ final class Target_io_netty_handler_ssl_JdkSslServerContext { final class Target_io_netty_handler_ssl_JdkSslClientContext { @Alias - Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, X509Certificate[] trustCertCollection, - TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, - String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, - CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, - long sessionCacheSize, long sessionTimeout, String keyStoreType) - throws SSLException { - + Target_io_netty_handler_ssl_JdkSslClientContext(Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, + KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, + ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, + SecureRandom secureRandom, String keyStoreType, String endpointIdentificationAlgorithm, + Target_io_netty_handler_ssl_ResumptionController resumptionController) throws SSLException { } } @@ -183,7 +208,7 @@ final class Target_io_netty_handler_ssl_SslHandler$SslEngineType { final class Target_io_netty_handler_ssl_JdkAlpnApplicationProtocolNegotiator_AlpnWrapper { @Substitute public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, - JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { + JdkApplicationProtocolNegotiator applicationNegotiator, boolean isServer) { return (SSLEngine) (Object) new Target_io_netty_handler_ssl_JdkAlpnSslEngine(engine, applicationNegotiator, isServer); } @@ -194,7 +219,16 @@ public SSLEngine wrapSslEngine(SSLEngine engine, ByteBufAllocator alloc, final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { @Alias Target_io_netty_handler_ssl_JdkAlpnSslEngine(final SSLEngine engine, - final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + final JdkApplicationProtocolNegotiator applicationNegotiator, final boolean isServer) { + + } +} + +@TargetClass(className = "io.netty.handler.ssl.ResumptionController") +final class Target_io_netty_handler_ssl_ResumptionController { + + @Alias + Target_io_netty_handler_ssl_ResumptionController() { } } @@ -203,37 +237,42 @@ final class Target_io_netty_handler_ssl_JdkAlpnSslEngine { final class Target_io_netty_handler_ssl_SslContext { @Substitute - static SslContext newServerContextInternal(SslProvider provider, Provider sslContextProvider, - X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, - X509Certificate[] keyCertChain, - PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, Iterable ciphers, - CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, long sessionCacheSize, long sessionTimeout, - ClientAuth clientAuth, String[] protocols, boolean startTls, boolean enableOcsp, String keyStoreType, - Map.Entry, Object>... ctxOptions) throws SSLException { + static SslContext newServerContextInternal(SslProvider provider, + Provider sslContextProvider, + X509Certificate[] trustCertCollection, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, + Iterable ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, + long sessionCacheSize, long sessionTimeout, ClientAuth clientAuth, String[] protocols, boolean startTls, + boolean enableOcsp, SecureRandom secureRandom, String keyStoreType, + Map.Entry, Object>... ctxOptions) throws SSLException { if (enableOcsp) { throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); } + Target_io_netty_handler_ssl_ResumptionController resumptionController = new Target_io_netty_handler_ssl_ResumptionController(); return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslServerContext(sslContextProvider, trustCertCollection, trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory, ciphers, cipherFilter, apn, sessionCacheSize, sessionTimeout, - clientAuth, protocols, startTls, keyStoreType); + clientAuth, protocols, startTls, secureRandom, keyStoreType, resumptionController); } @Substitute - static SslContext newClientContextInternal(SslProvider provider, Provider sslContextProvider, - X509Certificate[] trustCert, - TrustManagerFactory trustManagerFactory, X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, - KeyManagerFactory keyManagerFactory, Iterable ciphers, CipherSuiteFilter cipherFilter, - ApplicationProtocolConfig apn, String[] protocols, long sessionCacheSize, long sessionTimeout, - boolean enableOcsp, - String keyStoreType, Map.Entry, Object>... options) throws SSLException { + static SslContext newClientContextInternal(SslProvider provider, + Provider sslContextProvider, + X509Certificate[] trustCert, TrustManagerFactory trustManagerFactory, + X509Certificate[] keyCertChain, PrivateKey key, String keyPassword, KeyManagerFactory keyManagerFactory, + Iterable ciphers, CipherSuiteFilter cipherFilter, ApplicationProtocolConfig apn, String[] protocols, + long sessionCacheSize, long sessionTimeout, boolean enableOcsp, + SecureRandom secureRandom, String keyStoreType, String endpointIdentificationAlgorithm, + Map.Entry, Object>... options) throws SSLException { if (enableOcsp) { throw new IllegalArgumentException("OCSP is not supported with this SslProvider: " + provider); } + Target_io_netty_handler_ssl_ResumptionController resumptionController = new Target_io_netty_handler_ssl_ResumptionController(); return (SslContext) (Object) new Target_io_netty_handler_ssl_JdkSslClientContext(sslContextProvider, trustCert, trustManagerFactory, keyCertChain, key, keyPassword, keyManagerFactory, ciphers, cipherFilter, apn, protocols, sessionCacheSize, - sessionTimeout, keyStoreType); + sessionTimeout, secureRandom, keyStoreType, endpointIdentificationAlgorithm, + resumptionController); } } @@ -368,22 +407,6 @@ private static Queue newTaskQueue0(int maxPendingTasks) { } } -@TargetClass(className = "io.netty.buffer.AbstractReferenceCountedByteBuf") -final class Target_io_netty_buffer_AbstractReferenceCountedByteBuf { - - @Alias - @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") - private static long REFCNT_FIELD_OFFSET; -} - -@TargetClass(className = "io.netty.util.AbstractReferenceCounted") -final class Target_io_netty_util_AbstractReferenceCounted { - - @Alias - @RecomputeFieldValue(kind = Kind.FieldOffset, name = "refCnt") - private static long REFCNT_FIELD_OFFSET; -} - // This class is runtime-initialized by NettyProcessor final class Holder_io_netty_util_concurrent_ScheduledFutureTask { static final long START_TIME = System.nanoTime(); @@ -433,11 +456,11 @@ static Class tryToLoadClass(final ClassLoader loader, final Class helper) final class Target_io_netty_buffer_EmptyByteBuf { @Alias - @RecomputeFieldValue(kind = Kind.Reset) + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset) private static ByteBuffer EMPTY_BYTE_BUFFER; @Alias - @RecomputeFieldValue(kind = Kind.Reset) + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.Reset) private static long EMPTY_BYTE_BUFFER_ADDRESS; @Substitute @@ -494,6 +517,10 @@ protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Excep return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } // 'identity' or unsupported return null; @@ -509,20 +536,23 @@ final class Target_io_netty_handler_codec_http2_DelegatingDecompressorFrameListe @Substitute protected EmbeddedChannel newContentDecompressor(ChannelHandlerContext ctx, CharSequence contentEncoding) throws Http2Exception { - if (!HttpHeaderValues.GZIP.contentEqualsIgnoreCase(contentEncoding) - && !HttpHeaderValues.X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { - if (!HttpHeaderValues.DEFLATE.contentEqualsIgnoreCase(contentEncoding) - && !HttpHeaderValues.X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { - return null; - } else { - ZlibWrapper wrapper = this.strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; - return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), - new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(wrapper) }); - } - } else { - return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), - new ChannelHandler[] { ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP) }); + if (GZIP.contentEqualsIgnoreCase(contentEncoding) || X_GZIP.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP)); } + if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { + final ZlibWrapper wrapper = strict ? ZlibWrapper.ZLIB : ZlibWrapper.ZLIB_OR_NONE; + // To be strict, 'deflate' means ZLIB, but some servers were not implemented correctly. + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); + } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } + + // 'identity' or unsupported + return null; } } @@ -588,6 +618,14 @@ private static PrivateKey getPrivateKeyFromByteBuffer(ByteBuf encodedKeyBuf, Str } } +@TargetClass(className = "io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess") +final class Target_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) + public static int LONG_ELEMENT_SHIFT; +} + class IsBouncyNotThere implements BooleanSupplier { @Override diff --git a/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java b/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java index 092276a81..b454c6111 100644 --- a/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java +++ b/test-functional/src/test-ssl/java/com/arangodb/ArangoSslTest.java @@ -20,6 +20,7 @@ package com.arangodb; +import com.arangodb.config.ArangoConfigProperties; import com.arangodb.entity.ArangoDBVersion; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; @@ -55,6 +56,36 @@ void connect(Protocol protocol) { assertThat(version).isNotNull(); } + @ParameterizedTest + @EnumSource(Protocol.class) + void connectWithCertConf(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslCertValue("MIIDezCCAmOgAwIBAgIEeDCzXzANBgkqhkiG9w0BAQsFADBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMjAxMTAxMTg1MTE5WhcNMzAxMDMwMTg1MTE5WjBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1WiDnd4+uCmMG539ZNZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMALX9euSseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7NzmvdogNXoJQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiRdJVPwUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf2MGO64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzbf8KnRY4PAgMBAAGjITAfMB0GA1UdDgQWBBTBrv9Awynt3C5IbaCNyOW5v4DNkTANBgkqhkiG9w0BAQsFAAOCAQEAIm9rPvDkYpmzpSIhR3VXG9Y71gxRDrqkEeLsMoEyqGnw/zx1bDCNeGg2PncLlW6zTIipEBooixIE9U7KxHgZxBy0Et6EEWvIUmnr6F4F+dbTD050GHlcZ7eOeqYTPYeQC502G1Fo4tdNi4lDP9L9XZpf7Q1QimRH2qaLS03ZFZa2tY7ah/RQqZL8Dkxx8/zc25sgTHVpxoK853glBVBs/ENMiyGJWmAXQayewY3EPt/9wGwV4KmU3dPDleQeXSUGPUISeQxFjy+jCw21pYviWVJTNBA9l5ny3GhEmcnOT/gQHCvVRLyGLMbaMZ4JrPwb+aAtBgrgeiK4xeSMMvrbhw==") + .verifyHost(false) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @ParameterizedTest + @EnumSource(Protocol.class) + void connectWithFileProperties(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile("arangodb-ssl.properties")) + .protocol(protocol) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + @ParameterizedTest @EnumSource(Protocol.class) void connectWithoutValidSslContext(Protocol protocol) { diff --git a/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java b/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java new file mode 100644 index 000000000..2e74a1fb0 --- /dev/null +++ b/test-functional/src/test-ssl/java/com/arangodb/HttpProxyTest.java @@ -0,0 +1,103 @@ +/* + * DISCLAIMER + * + * Copyright 2016 ArangoDB GmbH, Cologne, Germany + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright holder is ArangoDB GmbH, Cologne, Germany + */ + +package com.arangodb; + +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.http.HttpProtocolConfig; +import io.netty.handler.proxy.ProxyConnectException; +import io.vertx.core.net.ProxyOptions; +import io.vertx.core.net.ProxyType; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * NB: excluded from shaded tests + */ +class HttpProxyTest extends BaseTest { + + @ParameterizedTest + @EnumSource(Protocol.class) + void httpProxy(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .protocolConfig(HttpProtocolConfig.builder() + .proxyOptions(new ProxyOptions() + .setType(ProxyType.HTTP) + .setHost("172.28.0.1") + .setPort(8888) + .setUsername("user") + .setPassword("password")) + .build()) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + + @ParameterizedTest + @EnumSource(Protocol.class) + void httpProxyWrongPassword(Protocol protocol) { + assumeTrue(protocol != Protocol.VST); + + final ArangoDB arangoDB = new ArangoDB.Builder() + .protocol(protocol) + .host("172.28.0.1", 8529) + .password("test") + .useSsl(true) + .sslContext(createSslContext()) + .verifyHost(false) + .protocolConfig(HttpProtocolConfig.builder() + .proxyOptions(new ProxyOptions() + .setType(ProxyType.HTTP) + .setHost("172.28.0.1") + .setPort(8888) + .setUsername("user") + .setPassword("wrong")) + .build()) + .build(); + Throwable thrown = catchThrowable(arangoDB::getVersion); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("Cannot contact any host!") + .cause() + .isInstanceOf(ArangoDBMultipleException.class); + List causes = ((ArangoDBMultipleException) thrown.getCause()).getExceptions(); + assertThat(causes).allSatisfy(e -> assertThat(e) + .isInstanceOf(ProxyConnectException.class) + .hasMessageContaining("status: 401 Unauthorized")); + assertThat(version).isNotNull(); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java index 866dfe5e4..77737ceef 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoCollectionAsyncTest.java @@ -3057,7 +3057,7 @@ void deleteDocumentsByDocumentOne(ArangoCollectionAsync collection) throws Execu @MethodSource("asyncCols") void deleteDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { final Collection values = new ArrayList<>(); - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final Collection keys = new ArrayList<>(); final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys).get(); assertThat(deleteResult).isNotNull(); @@ -3069,7 +3069,7 @@ void deleteDocumentsEmpty(ArangoCollectionAsync collection) throws ExecutionExce @MethodSource("asyncCols") void deleteDocumentsByKeyNotExisting(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { final Collection values = new ArrayList<>(); - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final Collection keys = Arrays.asList(rnd(), rnd()); final MultiDocumentEntity deleteResult = collection.deleteDocuments(keys).get(); @@ -3170,7 +3170,7 @@ void updateDocumentsWithoutKey(ArangoCollectionAsync collection) throws Executio { values.add(new BaseDocument("1")); } - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final Collection updatedValues = new ArrayList<>(); for (final BaseDocument i : values) { i.addAttribute("a", "test"); @@ -3188,7 +3188,7 @@ void updateDocumentsJson(ArangoCollectionAsync collection) throws ExecutionExcep final Collection values = new ArrayList<>(); values.add(RawJson.of("{\"_key\":\"1\"}")); values.add(RawJson.of("{\"_key\":\"2\"}")); - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final Collection updatedValues = new ArrayList<>(); updatedValues.add(RawJson.of("{\"_key\":\"1\", \"foo\":\"bar\"}")); @@ -3263,7 +3263,7 @@ void replaceDocumentsOne(ArangoCollectionAsync collection) throws ExecutionExcep e.setKey("1"); values.add(e); } - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final Collection updatedValues = new ArrayList<>(); final BaseDocument first = values.iterator().next(); first.addAttribute("a", "test"); @@ -3321,7 +3321,7 @@ void replaceDocumentsJson(ArangoCollectionAsync collection) throws ExecutionExce @MethodSource("asyncCols") void replaceDocumentsRawData(ArangoCollectionAsync collection) throws ExecutionException, InterruptedException { final RawData values = RawJson.of("[{\"_key\":\"1\"}, {\"_key\":\"2\"}]"); - collection.insertDocuments(values); + collection.insertDocuments(values).get(); final RawData updatedValues = RawJson.of("[{\"_key\":\"1\", \"foo\":\"bar\"}, {\"_key\":\"2\", " + "\"foo\":\"bar\"}]"); diff --git a/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java b/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java index b70da4d4d..aa28ef9c1 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoCollectionTest.java @@ -754,6 +754,34 @@ void getDocuments(ArangoCollection collection) { } } + @ParameterizedTest + @MethodSource("cols") + void getDocumentsUserData(ArangoCollection collection) { + Cat a = new Cat(); + a.setKey(UUID.randomUUID().toString()); + a.setName("a"); + + Cat b = new Cat(); + b.setKey(UUID.randomUUID().toString()); + b.setName("b"); + + final List values = Arrays.asList(a, b); + collection.insertDocuments(values); + final MultiDocumentEntity documents = collection.getDocuments(Arrays.asList(a.getKey(), b.getKey()), + Cat.class); + assertThat(documents).isNotNull(); + assertThat(documents.getDocuments()) + .hasSize(2) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(a.getKey()); + assertThat(d.getName()).isEqualTo(a.getName()); + }) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(b.getKey()); + assertThat(d.getName()).isEqualTo(b.getName()); + }); + } + @ParameterizedTest @MethodSource("cols") void getDocumentsWithCustomShardingKey(ArangoCollection c) { @@ -2413,6 +2441,33 @@ void insertDocuments(ArangoCollection collection) { assertThat(docs.getErrors()).isEmpty(); } + @ParameterizedTest + @MethodSource("cols") + void insertDocumentsReturnNewUserData(ArangoCollection collection) { + Cat a = new Cat(); + a.setKey(UUID.randomUUID().toString()); + a.setName("a"); + + Cat b = new Cat(); + b.setKey(UUID.randomUUID().toString()); + b.setName("b"); + + final List values = Arrays.asList(a, b); + MultiDocumentEntity> res = + collection.insertDocuments(values, new DocumentCreateOptions().returnNew(true), Cat.class); + assertThat(res).isNotNull(); + assertThat(res.getDocuments()) + .hasSize(2) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(a.getKey()); + assertThat(d.getNew().getName()).isEqualTo(a.getName()); + }) + .anySatisfy(d -> { + assertThat(d.getKey()).isEqualTo(b.getKey()); + assertThat(d.getNew().getName()).isEqualTo(b.getName()); + }); + } + @ParameterizedTest @MethodSource("cols") void insertDocumentsOverwriteModeUpdate(ArangoCollection collection) { @@ -2537,9 +2592,10 @@ void insertDocumentsReturnNew(ArangoCollection collection) { for (final DocumentCreateEntity doc : docs.getDocuments()) { assertThat(doc.getNew()).isNotNull(); final BaseDocument baseDocument = doc.getNew(); + assertThat(baseDocument.getId()).isNotNull(); assertThat(baseDocument.getKey()).isNotNull(); + assertThat(baseDocument.getRevision()).isNotNull(); } - } @ParameterizedTest @@ -3397,36 +3453,51 @@ void getPropeties(ArangoCollection collection) { @ParameterizedTest @MethodSource("cols") void changeProperties(ArangoCollection collection) { + assumeTrue(isCluster()); final CollectionPropertiesEntity properties = collection.getProperties(); assertThat(properties.getWaitForSync()).isNotNull(); - if (isAtLeastVersion(3, 7)) { - assertThat(properties.getSchema()).isNull(); - } + assertThat(properties.getSchema()).isNull(); String schemaRule = ("{ " + " \"properties\": {" + " \"number\": {" + " " + " \"type\": \"number\"" + " }" + " }" + " }").replaceAll("\\s", ""); String schemaMessage = "The document has problems!"; CollectionPropertiesOptions updatedOptions = - new CollectionPropertiesOptions().waitForSync(!properties.getWaitForSync()).schema(new CollectionSchema().setLevel(CollectionSchema.Level.NEW).setMessage(schemaMessage).setRule(schemaRule)); + new CollectionPropertiesOptions() + .cacheEnabled(!properties.getCacheEnabled()) + .computedValues(new ComputedValue() + .name("foo") + .expression("RETURN 11") + .overwrite(false) + .computeOn(ComputedValue.ComputeOn.insert) + .keepNull(false) + .failOnWarning(true)) + .replicationFactor(ReplicationFactor.of(3)) + .schema(new CollectionSchema().setLevel(CollectionSchema.Level.NEW).setMessage(schemaMessage).setRule(schemaRule)) + .waitForSync(!properties.getWaitForSync()) + .writeConcern(2); final CollectionPropertiesEntity changedProperties = collection.changeProperties(updatedOptions); - assertThat(changedProperties.getWaitForSync()).isNotNull(); - assertThat(changedProperties.getWaitForSync()).isEqualTo(!properties.getWaitForSync()); - if (isAtLeastVersion(3, 7)) { - assertThat(changedProperties.getSchema()).isNotNull(); - assertThat(changedProperties.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); - assertThat(changedProperties.getSchema().getMessage()).isEqualTo(schemaMessage); - assertThat(changedProperties.getSchema().getRule()).isEqualTo(schemaRule); - } + assertThat(changedProperties.getCacheEnabled()).isEqualTo(updatedOptions.getCacheEnabled()); + assertThat(changedProperties.getComputedValues()) + .hasSize(1) + .contains(updatedOptions.getComputedValues().get(0)); + assertThat(changedProperties.getReplicationFactor().get()).isEqualTo(updatedOptions.getReplicationFactor().get()); + assertThat(changedProperties.getSchema().getLevel()).isEqualTo(CollectionSchema.Level.NEW); + assertThat(changedProperties.getSchema().getMessage()).isEqualTo(schemaMessage); + assertThat(changedProperties.getSchema().getRule()).isEqualTo(schemaRule); + assertThat(changedProperties.getWaitForSync()).isEqualTo(updatedOptions.getWaitForSync()); + assertThat(changedProperties.getWriteConcern()).isEqualTo(updatedOptions.getWriteConcern()); // revert changes - CollectionPropertiesEntity revertedProperties = collection.changeProperties(new CollectionPropertiesOptions() - .waitForSync(properties.getWaitForSync()).schema(new CollectionSchema())); - if (isAtLeastVersion(3, 7)) { - assertThat(revertedProperties.getSchema()).isNull(); - } - + CollectionPropertiesOptions revertOptions = new CollectionPropertiesOptions() + .cacheEnabled(properties.getCacheEnabled()) + .computedValues() + .replicationFactor(properties.getReplicationFactor()) + .schema(properties.getSchema()) + .waitForSync(properties.getWaitForSync()) + .writeConcern(properties.getWriteConcern()); + collection.changeProperties(revertOptions); } @ParameterizedTest diff --git a/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java b/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java index 2e00339a0..c017ef718 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoConfigTest.java @@ -5,11 +5,15 @@ import com.arangodb.internal.config.ArangoConfig; import org.junit.jupiter.api.Test; +import javax.net.ssl.SSLContext; + +import java.security.NoSuchAlgorithmException; + import static org.assertj.core.api.Assertions.assertThat; public class ArangoConfigTest { @Test - void ArangoConfigDefaultValues() { + void ArangoConfigDefaultValues() throws NoSuchAlgorithmException { ArangoConfig cfg = new ArangoConfig(); assertThat(cfg.getHosts()).isEqualTo(ArangoDefaults.DEFAULT_HOSTS); assertThat(cfg.getProtocol()).isEqualTo(Protocol.HTTP2_JSON); @@ -18,7 +22,7 @@ void ArangoConfigDefaultValues() { assertThat(cfg.getPassword()).isNull(); assertThat(cfg.getJwt()).isNull(); assertThat(cfg.getUseSsl()).isEqualTo(ArangoDefaults.DEFAULT_USE_SSL); - assertThat(cfg.getSslContext()).isNull(); + assertThat(cfg.getSslContext()).isEqualTo(SSLContext.getDefault()); assertThat(cfg.getVerifyHost()).isEqualTo(ArangoDefaults.DEFAULT_VERIFY_HOST); assertThat(cfg.getChunkSize()).isEqualTo(ArangoDefaults.DEFAULT_CHUNK_SIZE); assertThat(cfg.getMaxConnections()).isEqualTo(ArangoDefaults.MAX_CONNECTIONS_HTTP2_DEFAULT); diff --git a/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java index 1598a7ca1..47ac152e8 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoDBAsyncTest.java @@ -20,7 +20,6 @@ package com.arangodb; -import com.arangodb.config.ConfigUtils; import com.arangodb.entity.*; import com.arangodb.internal.ArangoRequestParam; import com.arangodb.internal.serde.SerdeUtils; @@ -237,15 +236,19 @@ void createUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedEx assumeTrue(isSingleServer()); String username = "user-" + UUID.randomUUID(); final UserEntity result = arangoDB.createUser(username, PW, null).get(); - assertThat(result.getUser()).isEqualTo(username); + try { + assertThat(result.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void deleteUser(ArangoDBAsync arangoDB) { + void deleteUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String username = "user-" + UUID.randomUUID(); - arangoDB.createUser(username, PW, null); - arangoDB.deleteUser(username); + arangoDB.createUser(username, PW, null).get(); + arangoDB.deleteUser(username).get(); } @ParameterizedTest @@ -261,7 +264,11 @@ void getUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedExcep String username = "user-" + UUID.randomUUID(); arangoDB.createUser(username, PW, null).get(); final UserEntity user = arangoDB.getUser(username).get(); - assertThat(user.getUser()).isEqualTo(username); + try { + assertThat(user.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @@ -279,28 +286,36 @@ void getUsers(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedExce final Collection initialUsers = arangoDB.getUsers().get(); arangoDB.createUser(username, PW, null).get(); - final Collection users = arangoDB.getUsers().get(); - assertThat(users).hasSize(initialUsers.size() + 1); + try { + final Collection users = arangoDB.getUsers().get(); + assertThat(users).hasSize(initialUsers.size() + 1); - final List expected = new ArrayList<>(users.size()); - // Add initial users, including root: - for (final UserEntity userEntity : initialUsers) { - expected.add(userEntity.getUser()); - } - // Add username: - expected.add(username); + final List expected = new ArrayList<>(users.size()); + // Add initial users, including root: + for (final UserEntity userEntity : initialUsers) { + expected.add(userEntity.getUser()); + } + // Add username: + expected.add(username); - for (final UserEntity user : users) { - assertThat(user.getUser()).isIn(expected); + for (final UserEntity user : users) { + assertThat(user.getUser()).isIn(expected); + } + } finally { + arangoDB.deleteUser(username).get(); } } @ParameterizedTest @MethodSource("asyncArangos") - void updateUserNoOptions(ArangoDBAsync arangoDB) { + void updateUserNoOptions(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String username = "user-" + UUID.randomUUID(); - arangoDB.createUser(username, PW, null); - arangoDB.updateUser(username, null); + arangoDB.createUser(username, PW, null).get(); + try { + arangoDB.updateUser(username, null).get(); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @@ -310,16 +325,20 @@ void updateUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedEx final Map extra = new HashMap<>(); extra.put("hund", false); arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)).get(); - extra.put("hund", true); - extra.put("mund", true); - final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)).get(); - assertThat(user.getExtra()).hasSize(2); - assertThat(user.getExtra()).containsKey("hund"); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); - final UserEntity user2 = arangoDB.getUser(username).get(); - assertThat(user2.getExtra()).hasSize(2); - assertThat(user2.getExtra()).containsKey("hund"); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + try { + extra.put("hund", true); + extra.put("mund", true); + final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)).get(); + assertThat(user.getExtra()).hasSize(2); + assertThat(user.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username).get(); + assertThat(user2.getExtra()).hasSize(2); + assertThat(user2.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @@ -329,32 +348,44 @@ void replaceUser(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedE final Map extra = new HashMap<>(); extra.put("hund", false); arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)).get(); - extra.remove("hund"); - extra.put("mund", true); - final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)).get(); - assertThat(user.getExtra()).hasSize(1); - assertThat(user.getExtra()).containsKey("mund"); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); - final UserEntity user2 = arangoDB.getUser(username).get(); - assertThat(user2.getExtra()).hasSize(1); - assertThat(user2.getExtra()).containsKey("mund"); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + try { + extra.remove("hund"); + extra.put("mund", true); + final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)).get(); + assertThat(user.getExtra()).hasSize(1); + assertThat(user.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username).get(); + assertThat(user2.getExtra()).hasSize(1); + assertThat(user2.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void updateUserDefaultDatabaseAccess(ArangoDBAsync arangoDB) { + void updateUserDefaultDatabaseAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String username = "user-" + UUID.randomUUID(); - arangoDB.createUser(username, PW); - arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW); + arangoDB.createUser(username, PW).get(); + try { + arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void updateUserDefaultCollectionAccess(ArangoDBAsync arangoDB) { + void updateUserDefaultCollectionAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String username = "user-" + UUID.randomUUID(); - arangoDB.createUser(username, PW); - arangoDB.grantDefaultCollectionAccess(username, Permissions.RW); + arangoDB.createUser(username, PW).get(); + try { + arangoDB.grantDefaultCollectionAccess(username, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(username).get(); + } } @ParameterizedTest @@ -637,29 +668,6 @@ void arangoDBException(ArangoDBAsync arangoDB) { assertThat(e.getErrorNum()).isEqualTo(1228); } - @ParameterizedTest - @MethodSource("asyncArangos") - void fallbackHost() throws ExecutionException, InterruptedException { - final ArangoDBAsync arangoDB = new ArangoDB.Builder() - .loadProperties(config) - .host("not-accessible", 8529).host("172.28.0.1", 8529) - .build() - .async(); - final ArangoDBVersion version = arangoDB.getVersion().get(); - assertThat(version).isNotNull(); - } - - @ParameterizedTest - @MethodSource("asyncArangos") - void loadpropertiesWithPrefix() throws ExecutionException, InterruptedException { - ArangoDBAsync adb = new ArangoDB.Builder() - .loadProperties(ConfigUtils.loadConfig("arangodb-with-prefix.properties", "adb")) - .build() - .async(); - adb.getVersion().get(); - adb.shutdown(); - } - @ParameterizedTest @MethodSource("asyncArangos") void accessMultipleDatabases(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { diff --git a/test-functional/src/test/java/com/arangodb/ArangoDBTest.java b/test-functional/src/test/java/com/arangodb/ArangoDBTest.java index 498f48e4c..030d3d6a7 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoDBTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoDBTest.java @@ -239,7 +239,11 @@ void getAccessibleDatabasesFor(ArangoDB arangoDB) { void createUser(ArangoDB arangoDB) { String username = "user-" + UUID.randomUUID(); final UserEntity result = arangoDB.createUser(username, PW, null); - assertThat(result.getUser()).isEqualTo(username); + try { + assertThat(result.getUser()).isEqualTo(username); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -281,19 +285,23 @@ void getUsers(ArangoDB arangoDB) { final Collection initialUsers = arangoDB.getUsers(); arangoDB.createUser(username, PW, null); - final Collection users = arangoDB.getUsers(); - assertThat(users).hasSize(initialUsers.size() + 1); + try { + final Collection users = arangoDB.getUsers(); + assertThat(users).hasSize(initialUsers.size() + 1); - final List expected = new ArrayList<>(users.size()); - // Add initial users, including root: - for (final UserEntity userEntity : initialUsers) { - expected.add(userEntity.getUser()); - } - // Add username: - expected.add(username); + final List expected = new ArrayList<>(users.size()); + // Add initial users, including root: + for (final UserEntity userEntity : initialUsers) { + expected.add(userEntity.getUser()); + } + // Add username: + expected.add(username); - for (final UserEntity user : users) { - assertThat(user.getUser()).isIn(expected); + for (final UserEntity user : users) { + assertThat(user.getUser()).isIn(expected); + } + } finally { + arangoDB.deleteUser(username); } } @@ -302,7 +310,11 @@ void getUsers(ArangoDB arangoDB) { void updateUserNoOptions(ArangoDB arangoDB) { String username = "user-" + UUID.randomUUID(); arangoDB.createUser(username, PW, null); - arangoDB.updateUser(username, null); + try { + arangoDB.updateUser(username, null); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -312,16 +324,20 @@ void updateUser(ArangoDB arangoDB) { final Map extra = new HashMap<>(); extra.put("hund", false); arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)); - extra.put("hund", true); - extra.put("mund", true); - final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)); - assertThat(user.getExtra()).hasSize(2); - assertThat(user.getExtra()).containsKey("hund"); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); - final UserEntity user2 = arangoDB.getUser(username); - assertThat(user2.getExtra()).hasSize(2); - assertThat(user2.getExtra()).containsKey("hund"); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + try { + extra.put("hund", true); + extra.put("mund", true); + final UserEntity user = arangoDB.updateUser(username, new UserUpdateOptions().extra(extra)); + assertThat(user.getExtra()).hasSize(2); + assertThat(user.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("hund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username); + assertThat(user2.getExtra()).hasSize(2); + assertThat(user2.getExtra()).containsKey("hund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("hund")))).isTrue(); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -331,16 +347,20 @@ void replaceUser(ArangoDB arangoDB) { final Map extra = new HashMap<>(); extra.put("hund", false); arangoDB.createUser(username, PW, new UserCreateOptions().extra(extra)); - extra.remove("hund"); - extra.put("mund", true); - final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)); - assertThat(user.getExtra()).hasSize(1); - assertThat(user.getExtra()).containsKey("mund"); - assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); - final UserEntity user2 = arangoDB.getUser(username); - assertThat(user2.getExtra()).hasSize(1); - assertThat(user2.getExtra()).containsKey("mund"); - assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + try { + extra.remove("hund"); + extra.put("mund", true); + final UserEntity user = arangoDB.replaceUser(username, new UserUpdateOptions().extra(extra)); + assertThat(user.getExtra()).hasSize(1); + assertThat(user.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user.getExtra().get("mund")))).isTrue(); + final UserEntity user2 = arangoDB.getUser(username); + assertThat(user2.getExtra()).hasSize(1); + assertThat(user2.getExtra()).containsKey("mund"); + assertThat(Boolean.valueOf(String.valueOf(user2.getExtra().get("mund")))).isTrue(); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -348,7 +368,11 @@ void replaceUser(ArangoDB arangoDB) { void updateUserDefaultDatabaseAccess(ArangoDB arangoDB) { String username = "user-" + UUID.randomUUID(); arangoDB.createUser(username, PW); - arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW); + try { + arangoDB.grantDefaultDatabaseAccess(username, Permissions.RW); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -356,7 +380,11 @@ void updateUserDefaultDatabaseAccess(ArangoDB arangoDB) { void updateUserDefaultCollectionAccess(ArangoDB arangoDB) { String username = "user-" + UUID.randomUUID(); arangoDB.createUser(username, PW); - arangoDB.grantDefaultCollectionAccess(username, Permissions.RW); + try { + arangoDB.grantDefaultCollectionAccess(username, Permissions.RW); + } finally { + arangoDB.deleteUser(username); + } } @ParameterizedTest @@ -651,16 +679,6 @@ void arangoDBException(ArangoDB arangoDB) { assertThat(e.getErrorNum()).isEqualTo(1228); } - @ParameterizedTest - @MethodSource("arangos") - void fallbackHost() { - final ArangoDB arangoDB = new ArangoDB.Builder() - .loadProperties(config) - .host("not-accessible", 8529).host("172.28.0.1", 8529).build(); - final ArangoDBVersion version = arangoDB.getVersion(); - assertThat(version).isNotNull(); - } - @ParameterizedTest @MethodSource("arangos") void loadproperties() { @@ -670,16 +688,6 @@ void loadproperties() { assertThat(thrown).isInstanceOf(IllegalArgumentException.class); } - @ParameterizedTest - @MethodSource("arangos") - void loadpropertiesWithPrefix() { - ArangoDB adb = new ArangoDB.Builder() - .loadProperties(ConfigUtils.loadConfig("arangodb-with-prefix.properties", "adb")) - .build(); - adb.getVersion(); - adb.shutdown(); - } - @ParameterizedTest @MethodSource("arangos") void accessMultipleDatabases(ArangoDB arangoDB) { diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java index 35502380a..67af254ea 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseAsyncTest.java @@ -21,13 +21,10 @@ package com.arangodb; import com.arangodb.entity.*; -import com.arangodb.entity.AqlExecutionExplainEntity.ExecutionPlan; import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; +import com.arangodb.internal.serde.InternalSerde; import com.arangodb.model.*; -import com.arangodb.util.MapBuilder; -import com.arangodb.util.RawBytes; -import com.arangodb.util.RawJson; -import com.arangodb.util.SlowTest; +import com.arangodb.util.*; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; @@ -42,6 +39,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.catchThrowable; +import static org.assertj.core.api.InstanceOfAssertFactories.*; +import static org.assertj.core.api.InstanceOfAssertFactories.DOUBLE; import static org.junit.jupiter.api.Assumptions.assumeTrue; @@ -476,31 +475,47 @@ void getCollectionsExcludeSystem(ArangoDatabaseAsync db) throws ExecutionExcepti void grantAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null).get(); - arangoDB.db(getTestDb()).grantAccess(user).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void grantAccessRW(ArangoDBAsync arangoDB) { + void grantAccessRW(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void grantAccessRO(ArangoDBAsync arangoDB) { + void grantAccessRO(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @MethodSource("asyncArangos") - void grantAccessNONE(ArangoDBAsync arangoDB) { + void grantAccessNONE(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @@ -513,10 +528,14 @@ void grantAccessUserNotFound(ArangoDatabaseAsync db) { @ParameterizedTest @MethodSource("asyncArangos") - void revokeAccess(ArangoDBAsync arangoDB) { + void revokeAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).revokeAccess(user); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).revokeAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @@ -529,10 +548,14 @@ void revokeAccessUserNotFound(ArangoDatabaseAsync db) { @ParameterizedTest @MethodSource("asyncArangos") - void resetAccess(ArangoDBAsync arangoDB) { + void resetAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).resetAccess(user); + arangoDB.createUser(user, "1234", null).get(); + try { + arangoDB.db(getTestDb()).resetAccess(user).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @@ -545,10 +568,14 @@ void resetAccessUserNotFound(ArangoDatabaseAsync db) { @ParameterizedTest @MethodSource("asyncArangos") - void grantDefaultCollectionAccess(ArangoDBAsync arangoDB) { + void grantDefaultCollectionAccess(ArangoDBAsync arangoDB) throws ExecutionException, InterruptedException { String user = "user-" + rnd(); - arangoDB.createUser(user, "1234"); - arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW); + arangoDB.createUser(user, "1234").get(); + try { + arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW).get(); + } finally { + arangoDB.deleteUser(user).get(); + } } @ParameterizedTest @@ -666,6 +693,19 @@ void queryWithTTL(ArangoDatabaseAsync db) throws InterruptedException, Execution assertThat(ex.getMessage()).isEqualTo("Response: 404, Error: 1600 - cursor not found"); } + @ParameterizedTest + @MethodSource("asyncDbs") + void queryRawBytes(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + InternalSerde serde = db.getSerde(); + RawBytes doc = RawBytes.of(serde.serialize(Collections.singletonMap("value", 1))); + RawBytes res = db.query("RETURN @doc", RawBytes.class, Collections.singletonMap("doc", doc)).get() + .getResult().get(0); + JsonNode data = serde.deserialize(res.get(), JsonNode.class); + assertThat(data.isObject()).isTrue(); + assertThat(data.get("value").isNumber()).isTrue(); + assertThat(data.get("value").numberValue()).isEqualTo(1); + } + @ParameterizedTest @MethodSource("asyncDbs") void changeQueryCache(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { @@ -889,7 +929,7 @@ void queryWithBindVars(ArangoDatabaseAsync db) throws ExecutionException, Interr @ParameterizedTest @MethodSource("asyncDbs") void queryWithRawBindVars(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { - final Map bindVars = new HashMap<>(); + final Map bindVars = new HashMap<>(); bindVars.put("foo", RawJson.of("\"fooValue\"")); bindVars.put("bar", RawBytes.of(db.getSerde().serializeUserData(11))); @@ -1054,79 +1094,225 @@ void queryAllowRetryCloseSingleBatch(ArangoDBAsync arangoDB) throws ExecutionExc cursor.close().get(); } + private String getExplainQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + ArangoCollectionAsync character = db.collection("got_characters"); + ArangoCollectionAsync actor = db.collection("got_actors"); + + if (!character.exists().get()) + character.create().get(); + + if (!actor.exists().get()) + actor.create().get(); + + return "FOR `character` IN `got_characters` " + + " FOR `actor` IN `got_actors` " + + " FILTER `actor`.`_id` == @myId" + + " FILTER `character`.`actor` == `actor`.`_id` " + + " FILTER `character`.`value` != 1/0 " + + " RETURN {`character`, `actor`}"; + } + + void checkExecutionPlan(AqlExecutionExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.getEstimatedNrItems()) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.getEstimatedCost()).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.getName()) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.getName()) + .isNotNull() + .isNotEmpty(); + } + + @SuppressWarnings("deprecation") @ParameterizedTest @MethodSource("asyncDbs") void explainQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { - final AqlExecutionExplainEntity explain = db.explainQuery("for i in 1..1 return i", null, null).get(); + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions()).get(); assertThat(explain).isNotNull(); - assertThat(explain.getPlan()).isNotNull(); + + checkExecutionPlan(explain.getPlan()); assertThat(explain.getPlans()).isNull(); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections()).isEmpty(); - assertThat(plan.getEstimatedCost()).isPositive(); - assertThat(plan.getEstimatedNrItems()).isPositive(); - assertThat(plan.getVariables()).hasSize(2); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("asyncDbs") + void explainQueryAllPlans(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions().allPlans(true)).get(); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + void checkUntypedExecutionPlan(AqlQueryExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.get("estimatedNrItems")) + .isInstanceOf(Integer.class) + .asInstanceOf(INTEGER) + .isNotNull() + .isNotNegative(); assertThat(plan.getNodes()).isNotEmpty(); - if (isAtLeastVersion(3, 10)) { - assertThat(explain.getStats().getPeakMemoryUsage()).isNotNull(); - assertThat(explain.getStats().getExecutionTime()).isNotNull(); - } + + AqlQueryExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.get("estimatedCost")).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); } @ParameterizedTest @MethodSource("asyncDbs") - void explainQueryWithBindVars(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { - final AqlExecutionExplainEntity explain = db.explainQuery("for i in 1..1 return @value", - Collections.singletonMap("value", 11), null).get(); + void explainAqlQuery(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions()).get(); assertThat(explain).isNotNull(); - assertThat(explain.getPlan()).isNotNull(); + + checkUntypedExecutionPlan(explain.getPlan()); assertThat(explain.getPlans()).isNull(); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections()).isEmpty(); - assertThat(plan.getEstimatedCost()).isPositive(); - assertThat(plan.getEstimatedNrItems()).isPositive(); - assertThat(plan.getVariables()).hasSize(3); - assertThat(plan.getNodes()).isNotEmpty(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); } @ParameterizedTest @MethodSource("asyncDbs") - void explainQueryWithWarnings(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { - AqlExecutionExplainEntity explain = db.explainQuery("return 1/0", null, null).get(); - assertThat(explain.getWarnings()) - .hasSize(1) - .allSatisfy(w -> { - assertThat(w.getCode()).isEqualTo(1562); - assertThat(w.getMessage()).isEqualTo("division by zero"); - }); + void explainAqlQueryAllPlans(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().allPlans(true)).get(); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); } @ParameterizedTest @MethodSource("asyncDbs") - void explainQueryWithIndexNode(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { - ArangoCollectionAsync character = db.collection("got_characters"); - ArangoCollectionAsync actor = db.collection("got_actors"); + void explainAqlQueryAllPlansCustomOption(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().customOption("allPlans", true)).get(); + assertThat(explain).isNotNull(); - if (!character.exists().get()) - character.create().get(); + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); - if (!actor.exists().get()) - actor.create().get(); + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); - String query = "" + - "FOR `character` IN `got_characters` " + - " FOR `actor` IN `got_actors` " + - " FILTER `character`.`actor` == `actor`.`_id` " + - " RETURN `character`"; - - final ExecutionPlan plan = db.explainQuery(query, null, null).get().getPlan(); - plan.getNodes().stream() - .filter(it -> "IndexNode".equals(it.getType())) - .flatMap(it -> it.getIndexes().stream()) - .forEach(it -> { - assertThat(it.getType()).isEqualTo(IndexType.primary); - assertThat(it.getFields()).contains("_key"); - }); + assertThat(explain.getCacheable()).isNull(); } @ParameterizedTest diff --git a/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java index b44ed20bb..3763aacbf 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoDatabaseTest.java @@ -21,8 +21,8 @@ package com.arangodb; import com.arangodb.entity.*; -import com.arangodb.entity.AqlExecutionExplainEntity.ExecutionPlan; import com.arangodb.entity.QueryCachePropertiesEntity.CacheMode; +import com.arangodb.internal.serde.InternalSerde; import com.arangodb.model.*; import com.arangodb.util.*; import com.fasterxml.jackson.databind.JsonNode; @@ -45,6 +45,7 @@ import static org.assertj.core.api.Assertions.catchThrowable; import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.InstanceOfAssertFactories.*; /** @@ -489,7 +490,11 @@ void getCollectionsExcludeSystem(ArangoDatabase db) { void grantAccess(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user); + try { + arangoDB.db(getTestDb()).grantAccess(user); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -497,7 +502,11 @@ void grantAccess(ArangoDB arangoDB) { void grantAccessRW(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RW); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -505,7 +514,11 @@ void grantAccessRW(ArangoDB arangoDB) { void grantAccessRO(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.RO); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -513,7 +526,11 @@ void grantAccessRO(ArangoDB arangoDB) { void grantAccessNONE(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE); + try { + arangoDB.db(getTestDb()).grantAccess(user, Permissions.NONE); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -529,7 +546,11 @@ void grantAccessUserNotFound(ArangoDatabase db) { void revokeAccess(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).revokeAccess(user); + try { + arangoDB.db(getTestDb()).revokeAccess(user); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -545,7 +566,11 @@ void revokeAccessUserNotFound(ArangoDatabase db) { void resetAccess(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234", null); - arangoDB.db(getTestDb()).resetAccess(user); + try { + arangoDB.db(getTestDb()).resetAccess(user); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -561,7 +586,11 @@ void resetAccessUserNotFound(ArangoDatabase db) { void grantDefaultCollectionAccess(ArangoDB arangoDB) { String user = "user-" + rnd(); arangoDB.createUser(user, "1234"); - arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW); + try { + arangoDB.db(getTestDb()).grantDefaultCollectionAccess(user, Permissions.RW); + } finally { + arangoDB.deleteUser(user); + } } @ParameterizedTest @@ -642,6 +671,25 @@ void queryWithLimitAndFullCount(ArangoDatabase db) { assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10); } + @ParameterizedTest + @MethodSource("dbs") + void queryWithLimitAndFullCountAsCustomOption(ArangoDatabase db) { + for (int i = 0; i < 10; i++) { + db.collection(CNAME1).insertDocument(new BaseDocument(), null); + } + + final ArangoCursor cursor = db + .query("for i in " + CNAME1 + " Limit 5 return i._id", String.class, new AqlQueryOptions() + .customOption("fullCount", true)); + assertThat((Object) cursor).isNotNull(); + for (int i = 0; i < 5; i++, cursor.next()) { + assertThat((Iterator) cursor).hasNext(); + } + assertThat(cursor.getStats()).isNotNull(); + assertThat(cursor.getStats().getExecutionTime()).isPositive(); + assertThat((cursor.getStats().getFullCount())).isGreaterThanOrEqualTo(10); + } + @ParameterizedTest @MethodSource("dbs") void queryStats(ArangoDatabase db) { @@ -736,6 +784,51 @@ void queryWithTTL(ArangoDatabase db) throws InterruptedException { } } + @ParameterizedTest + @MethodSource("dbs") + void queryRawBytes(ArangoDatabase db) { + InternalSerde serde = db.getSerde(); + RawBytes doc = RawBytes.of(serde.serialize(Collections.singletonMap("value", 1))); + RawBytes res = db.query("RETURN @doc", RawBytes.class, Collections.singletonMap("doc", doc)).next(); + JsonNode data = serde.deserialize(res.get(), JsonNode.class); + assertThat(data.isObject()).isTrue(); + assertThat(data.get("value").isNumber()).isTrue(); + assertThat(data.get("value").numberValue()).isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserDataScalar(ArangoDatabase db) { + List docs = Arrays.asList("a", "b", "c"); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", String.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).contains("a", "b", "c"); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserDataManaged(ArangoDatabase db) { + RawJson a = RawJson.of("\"foo\""); + RawJson b = RawJson.of("{\"key\":\"value\"}"); + RawJson c = RawJson.of("[1,null,true,\"bla\",{},[],\"\"]"); + RawJson docs = RawJson.of("[" + a.get() + "," + b.get() + "," + c.get() + "]"); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", RawJson.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).containsExactly(a, b, c); + } + + @ParameterizedTest + @MethodSource("dbs") + void queryUserData(ArangoDatabase db) { + Object a = "foo"; + Object b = Collections.singletonMap("key", "value"); + Object c = Arrays.asList(1, null, true, "bla", Collections.emptyMap(), Collections.emptyList(), ""); + List docs = Arrays.asList(a, b, c); + ArangoCursor res = db.query("FOR d IN @docs RETURN d", Object.class, + Collections.singletonMap("docs", docs), new AqlQueryOptions().batchSize(1)); + assertThat((Iterable) res).containsExactly(a, b, c); + } + @ParameterizedTest @MethodSource("dbs") void changeQueryCache(ArangoDatabase db) { @@ -957,7 +1050,7 @@ void queryWithBindVars(ArangoDatabase db) { @ParameterizedTest @MethodSource("dbs") void queryWithRawBindVars(ArangoDatabase db) { - final Map bindVars = new HashMap<>(); + final Map bindVars = new HashMap<>(); bindVars.put("foo", RawJson.of("\"fooValue\"")); bindVars.put("bar", RawBytes.of(db.getSerde().serializeUserData(11))); @@ -1125,79 +1218,225 @@ void queryAllowRetryCloseSingleBatch(ArangoDB arangoDB) throws IOException { cursor.close(); } + private String getExplainQuery(ArangoDatabase db) { + ArangoCollection character = db.collection("got_characters"); + ArangoCollection actor = db.collection("got_actors"); + + if (!character.exists()) + character.create(); + + if (!actor.exists()) + actor.create(); + + return "FOR `character` IN `got_characters` " + + " FOR `actor` IN `got_actors` " + + " FILTER `actor`.`_id` == @myId" + + " FILTER `character`.`actor` == `actor`.`_id` " + + " FILTER `character`.`value` != 1/0 " + + " RETURN {`character`, `actor`}"; + } + + void checkExecutionPlan(AqlExecutionExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.getEstimatedNrItems()) + .isNotNull() + .isNotNegative(); + assertThat(plan.getNodes()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.getEstimatedCost()).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.getName()) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlExecutionExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.getName()) + .isNotNull() + .isNotEmpty(); + } + + @SuppressWarnings("deprecation") @ParameterizedTest @MethodSource("dbs") void explainQuery(ArangoDatabase db) { - final AqlExecutionExplainEntity explain = db.explainQuery("for i in 1..1 return i", null, null); + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions()); assertThat(explain).isNotNull(); - assertThat(explain.getPlan()).isNotNull(); + + checkExecutionPlan(explain.getPlan()); assertThat(explain.getPlans()).isNull(); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections()).isEmpty(); - assertThat(plan.getEstimatedCost()).isPositive(); - assertThat(plan.getEstimatedNrItems()).isPositive(); - assertThat(plan.getVariables()).hasSize(2); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); + } + + @SuppressWarnings("deprecation") + @ParameterizedTest + @MethodSource("dbs") + void explainQueryAllPlans(ArangoDatabase db) { + AqlExecutionExplainEntity explain = db.explainQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new AqlQueryExplainOptions().allPlans(true)); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().getExecutionTime()) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); + } + + void checkUntypedExecutionPlan(AqlQueryExplainEntity.ExecutionPlan plan) { + assertThat(plan).isNotNull(); + assertThat(plan.get("estimatedNrItems")) + .isInstanceOf(Integer.class) + .asInstanceOf(INTEGER) + .isNotNull() + .isNotNegative(); assertThat(plan.getNodes()).isNotEmpty(); - if (isAtLeastVersion(3, 10)) { - assertThat(explain.getStats().getPeakMemoryUsage()).isNotNull(); - assertThat(explain.getStats().getExecutionTime()).isNotNull(); - } + + AqlQueryExplainEntity.ExecutionNode node = plan.getNodes().iterator().next(); + assertThat(node.get("estimatedCost")).isNotNull(); + + assertThat(plan.getEstimatedCost()).isNotNull().isNotNegative(); + assertThat(plan.getCollections()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionCollection collection = plan.getCollections().iterator().next(); + assertThat(collection.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); + + assertThat(plan.getRules()).isNotEmpty(); + assertThat(plan.getVariables()).isNotEmpty(); + + AqlQueryExplainEntity.ExecutionVariable variable = plan.getVariables().iterator().next(); + assertThat(variable.get("name")) + .isInstanceOf(String.class) + .asInstanceOf(STRING) + .isNotNull() + .isNotEmpty(); } @ParameterizedTest @MethodSource("dbs") - void explainQueryWithBindVars(ArangoDatabase db) { - final AqlExecutionExplainEntity explain = db.explainQuery("for i in 1..1 return @value", - Collections.singletonMap("value", 11), null); + void explainAqlQuery(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions()); assertThat(explain).isNotNull(); - assertThat(explain.getPlan()).isNotNull(); + + checkUntypedExecutionPlan(explain.getPlan()); assertThat(explain.getPlans()).isNull(); - final ExecutionPlan plan = explain.getPlan(); - assertThat(plan.getCollections()).isEmpty(); - assertThat(plan.getEstimatedCost()).isPositive(); - assertThat(plan.getEstimatedNrItems()).isPositive(); - assertThat(plan.getVariables()).hasSize(3); - assertThat(plan.getNodes()).isNotEmpty(); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isFalse(); } @ParameterizedTest @MethodSource("dbs") - void explainQueryWithWarnings(ArangoDatabase db) { - AqlExecutionExplainEntity explain = db.explainQuery("return 1/0", null, null); - assertThat(explain.getWarnings()) - .hasSize(1) - .allSatisfy(w -> { - assertThat(w.getCode()).isEqualTo(1562); - assertThat(w.getMessage()).isEqualTo("division by zero"); - }); + void explainAqlQueryAllPlans(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().allPlans(true)); + assertThat(explain).isNotNull(); + + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); + + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); + + assertThat(explain.getCacheable()).isNull(); } @ParameterizedTest @MethodSource("dbs") - void explainQueryWithIndexNode(ArangoDatabase db) { - ArangoCollection character = db.collection("got_characters"); - ArangoCollection actor = db.collection("got_actors"); + void explainAqlQueryAllPlansCustomOption(ArangoDatabase db) { + AqlQueryExplainEntity explain = db.explainAqlQuery( + getExplainQuery(db), + Collections.singletonMap("myId", "123"), + new ExplainAqlQueryOptions().customOption("allPlans", true)); + assertThat(explain).isNotNull(); - if (!character.exists()) - character.create(); + assertThat(explain.getPlan()).isNull(); + assertThat(explain.getPlans()).allSatisfy(this::checkUntypedExecutionPlan); + assertThat(explain.getWarnings()).isNotEmpty(); - if (!actor.exists()) - actor.create(); + CursorWarning warning = explain.getWarnings().iterator().next(); + assertThat(warning).isNotNull(); + assertThat(warning.getCode()).isEqualTo(1562); + assertThat(warning.getMessage()).contains("division by zero"); + + assertThat(explain.getStats()).isNotNull(); + + assertThat(explain.getStats().get("executionTime")) + .isInstanceOf(Double.class) + .asInstanceOf(DOUBLE) + .isNotNull() + .isPositive(); - String query = "" + - "FOR `character` IN `got_characters` " + - " FOR `actor` IN `got_actors` " + - " FILTER `character`.`actor` == `actor`.`_id` " + - " RETURN `character`"; - - final ExecutionPlan plan = db.explainQuery(query, null, null).getPlan(); - plan.getNodes().stream() - .filter(it -> "IndexNode".equals(it.getType())) - .flatMap(it -> it.getIndexes().stream()) - .forEach(it -> { - assertThat(it.getType()).isEqualTo(IndexType.primary); - assertThat(it.getFields()).contains("_key"); - }); + assertThat(explain.getCacheable()).isNull(); } @ParameterizedTest diff --git a/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java index 27cf4e31e..400c497ca 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoSearchAsyncTest.java @@ -28,6 +28,7 @@ import com.arangodb.model.InvertedIndexOptions; import com.arangodb.model.arangosearch.*; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -740,7 +741,7 @@ void arangoSearchOptions(ArangoDatabaseAsync db) throws ExecutionException, Inte } if (isEnterprise() && isAtLeastVersion(3, 12)) { - assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); + assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); } } @@ -980,6 +981,7 @@ void collationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, Interr } + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") @ParameterizedTest @MethodSource("asyncDbs") void classificationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { @@ -1004,6 +1006,7 @@ void classificationAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, I createGetAndDeleteTypedAnalyzer(db, analyzer); } + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") @ParameterizedTest @MethodSource("asyncDbs") void nearestNeighborsAnalyzer(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { diff --git a/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java b/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java index cc0412bbf..e29a6907e 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoSearchTest.java @@ -28,6 +28,7 @@ import com.arangodb.model.InvertedIndexOptions; import com.arangodb.model.arangosearch.*; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.DisabledIfSystemProperty; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -154,7 +155,7 @@ void createArangoSearchViewWithPrimarySort(ArangoDatabase db) { final ArangoSearchCreateOptions options = new ArangoSearchCreateOptions(); final PrimarySort primarySort = PrimarySort.on("myFieldName"); - primarySort.ascending(true); + primarySort.ascending(false); options.primarySort(primarySort); options.primarySortCompression(ArangoSearchCompression.none); options.consolidationIntervalMsec(666666L); @@ -179,6 +180,13 @@ void createArangoSearchViewWithPrimarySort(ArangoDatabase db) { assertThat(retrievedStoredValue).isNotNull(); assertThat(retrievedStoredValue.getFields()).isEqualTo(storedValue.getFields()); assertThat(retrievedStoredValue.getCompression()).isEqualTo(storedValue.getCompression()); + assertThat(properties.getPrimarySort()) + .hasSize(1) + .allSatisfy(ps -> { + assertThat(ps).isNotNull(); + assertThat(ps.getField()).isEqualTo(primarySort.getField()); + assertThat(ps.getAscending()).isEqualTo(primarySort.getAscending()); + }); } } @@ -739,7 +747,7 @@ void arangoSearchOptions(ArangoDatabase db) { } if (isEnterprise() && isAtLeastVersion(3, 12)) { - assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); + assertThat(properties.getOptimizeTopK()).containsExactly(optimizeTopK); } } @@ -979,6 +987,7 @@ void collationAnalyzer(ArangoDatabase db) { } + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") @ParameterizedTest @MethodSource("dbs") void classificationAnalyzer(ArangoDatabase db) { @@ -1003,6 +1012,7 @@ void classificationAnalyzer(ArangoDatabase db) { createGetAndDeleteTypedAnalyzer(db, analyzer); } + @DisabledIfSystemProperty(named = "skipStatefulTests", matches = "^(|true|1)$", disabledReason = "Test requires server with analyzer model located at `/tmp/foo.bin`") @ParameterizedTest @MethodSource("dbs") void nearestNeighborsAnalyzer(ArangoDatabase db) { diff --git a/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java b/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java index 362d07447..a1cd046f0 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoViewAsyncTest.java @@ -22,6 +22,7 @@ import com.arangodb.entity.ViewEntity; import com.arangodb.entity.ViewType; +import com.arangodb.util.TestUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -49,8 +50,13 @@ static void init() { @MethodSource("asyncDbs") void create(ArangoDatabaseAsync db) throws ExecutionException, InterruptedException { String name = rndName(); - db.createView(name, ViewType.ARANGO_SEARCH).get(); - assertThat(db.view(name).exists().get()).isTrue(); + try { + db.createView(name, ViewType.ARANGO_SEARCH).get(); + assertThat(db.view(name).exists().get()).isTrue(); + } catch (Exception e) { + System.err.println("Got exception with name: " + TestUtils.unicodeEscape(name)); + throw e; + } } @ParameterizedTest diff --git a/test-functional/src/test/java/com/arangodb/ArangoViewTest.java b/test-functional/src/test/java/com/arangodb/ArangoViewTest.java index 6a2d67c2d..67482161b 100644 --- a/test-functional/src/test/java/com/arangodb/ArangoViewTest.java +++ b/test-functional/src/test/java/com/arangodb/ArangoViewTest.java @@ -22,6 +22,7 @@ import com.arangodb.entity.ViewEntity; import com.arangodb.entity.ViewType; +import com.arangodb.util.TestUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -48,8 +49,13 @@ static void init() { @MethodSource("dbs") void create(ArangoDatabase db) { String name = rndName(); - db.createView(name, ViewType.ARANGO_SEARCH); - assertThat(db.view(name).exists()).isTrue(); + try { + db.createView(name, ViewType.ARANGO_SEARCH); + assertThat(db.view(name).exists()).isTrue(); + } catch (Exception e) { + System.err.println("Got exception with name: " + TestUtils.unicodeEscape(name)); + throw e; + } } @ParameterizedTest diff --git a/test-functional/src/test/java/com/arangodb/BaseJunit5.java b/test-functional/src/test/java/com/arangodb/BaseJunit5.java index c6759f07b..d5c491361 100644 --- a/test-functional/src/test/java/com/arangodb/BaseJunit5.java +++ b/test-functional/src/test/java/com/arangodb/BaseJunit5.java @@ -101,7 +101,7 @@ static ArangoDatabase initDB(String name) { return database; } - static ArangoDatabase initDB() { + protected static ArangoDatabase initDB() { return initDB(TEST_DB); } diff --git a/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java b/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java index a1152c402..ea82b52d1 100644 --- a/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java +++ b/test-functional/src/test/java/com/arangodb/JacksonRequestContextTest.java @@ -44,7 +44,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** - * @author Michele Rastelli + * NB: excluded from shaded tests */ class JacksonRequestContextTest { diff --git a/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java b/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java index 895dc8687..f4c63d7d0 100644 --- a/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/JwtAuthAsyncTest.java @@ -1,5 +1,6 @@ package com.arangodb; +import com.arangodb.config.ArangoConfigProperties; import com.arangodb.config.ConfigUtils; import com.arangodb.internal.ArangoRequestParam; import org.junit.jupiter.api.BeforeAll; @@ -32,9 +33,10 @@ static void init() { } private static String getJwt(ArangoDB arangoDB) { + ArangoConfigProperties conf = ConfigUtils.loadConfig(); Map reqBody = new HashMap<>(); - reqBody.put("username", "root"); - reqBody.put("password", "test"); + reqBody.put("username", conf.getUser().orElse("root")); + reqBody.put("password", conf.getPassword().orElse(null)); Request req = Request.builder() .db(ArangoRequestParam.SYSTEM) diff --git a/test-functional/src/test/java/com/arangodb/JwtAuthTest.java b/test-functional/src/test/java/com/arangodb/JwtAuthTest.java index 1f9185430..b743db4a1 100644 --- a/test-functional/src/test/java/com/arangodb/JwtAuthTest.java +++ b/test-functional/src/test/java/com/arangodb/JwtAuthTest.java @@ -1,5 +1,6 @@ package com.arangodb; +import com.arangodb.config.ArangoConfigProperties; import com.arangodb.config.ConfigUtils; import com.arangodb.internal.ArangoRequestParam; import org.junit.jupiter.api.BeforeAll; @@ -31,9 +32,10 @@ static void init() { } private static String getJwt(ArangoDB arangoDB) { + ArangoConfigProperties conf = ConfigUtils.loadConfig(); Map reqBody = new HashMap<>(); - reqBody.put("username", "root"); - reqBody.put("password", "test"); + reqBody.put("username", conf.getUser().orElse("root")); + reqBody.put("password", conf.getPassword().orElse(null)); Request req = Request.builder() .db(ArangoRequestParam.SYSTEM) diff --git a/test-functional/src/test/java/com/arangodb/RequestContextTest.java b/test-functional/src/test/java/com/arangodb/RequestContextTest.java index 730dea14d..f76a0fd3d 100644 --- a/test-functional/src/test/java/com/arangodb/RequestContextTest.java +++ b/test-functional/src/test/java/com/arangodb/RequestContextTest.java @@ -43,7 +43,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** - * @author Michele Rastelli + * NB: excluded from shaded tests */ class RequestContextTest { diff --git a/test-functional/src/test/java/com/arangodb/SerializableTest.java b/test-functional/src/test/java/com/arangodb/SerializableTest.java index 17b1ec1da..a915a74aa 100644 --- a/test-functional/src/test/java/com/arangodb/SerializableTest.java +++ b/test-functional/src/test/java/com/arangodb/SerializableTest.java @@ -1,5 +1,7 @@ package com.arangodb; +import com.arangodb.entity.BaseDocument; +import com.arangodb.entity.BaseEdgeDocument; import com.arangodb.entity.ErrorEntity; import com.arangodb.internal.net.ArangoDBRedirectException; import com.fasterxml.jackson.databind.JsonNode; @@ -50,6 +52,30 @@ void serializeArangoDBMultipleException() throws IOException, ClassNotFoundExcep assertThat(e2.getExceptions().iterator().next().getMessage()).isEqualTo("foo"); } + @Test + void serializeBaseDocument() throws IOException, ClassNotFoundException { + BaseDocument doc = new BaseDocument(); + doc.setKey("test"); + doc.setId("id"); + doc.setRevision("revision"); + doc.addAttribute("foo", "bar"); + BaseDocument doc2 = roundTrip(doc); + assertThat(doc2).isEqualTo(doc); + } + + @Test + void serializeBaseEdgeDocument() throws IOException, ClassNotFoundException { + BaseEdgeDocument doc = new BaseEdgeDocument(); + doc.setKey("test"); + doc.setId("id"); + doc.setRevision("revision"); + doc.setFrom("from"); + doc.setTo("to"); + doc.addAttribute("foo", "bar"); + BaseDocument doc2 = roundTrip(doc); + assertThat(doc2).isEqualTo(doc); + } + private T roundTrip(T input) throws IOException, ClassNotFoundException { ByteArrayOutputStream os = new ByteArrayOutputStream(); ObjectOutputStream objectOutputStream = new ObjectOutputStream(os); diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java index 5c28f8b40..2ad090146 100644 --- a/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionAsyncTest.java @@ -686,7 +686,7 @@ void createCursor(ArangoDatabaseAsync db) throws ExecutionException, Interrupted DocumentCreateEntity externalDoc = collection .insertDocument(new BaseDocument(), null).get(); - final Map bindVars = new HashMap<>(); + final Map bindVars = new HashMap<>(); bindVars.put("@collection", COLLECTION_NAME); bindVars.put("key", externalDoc.getKey()); diff --git a/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java b/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java index dff39ef83..826a6696f 100644 --- a/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java +++ b/test-functional/src/test/java/com/arangodb/StreamTransactionTest.java @@ -691,7 +691,7 @@ void createCursor(ArangoDatabase db) { DocumentCreateEntity externalDoc = collection .insertDocument(new BaseDocument(), null); - final Map bindVars = new HashMap<>(); + final Map bindVars = new HashMap<>(); bindVars.put("@collection", COLLECTION_NAME); bindVars.put("key", externalDoc.getKey()); diff --git a/test-functional/src/test/java/com/arangodb/UserAgentTest.java b/test-functional/src/test/java/com/arangodb/UserAgentTest.java index 63e2ea555..e0c35ef6b 100644 --- a/test-functional/src/test/java/com/arangodb/UserAgentTest.java +++ b/test-functional/src/test/java/com/arangodb/UserAgentTest.java @@ -10,7 +10,7 @@ class UserAgentTest extends BaseJunit5 { - private static final String EXPECTED_VERSION = "7.10.0"; + private static final String EXPECTED_VERSION = "7.23.0"; private static final boolean SHADED = Boolean.parseBoolean(System.getProperty("shaded")); diff --git a/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java b/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java index 0b7dca677..dcef7a620 100644 --- a/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java +++ b/test-functional/src/test/java/com/arangodb/config/ConfigUtils.java @@ -1,5 +1,7 @@ package com.arangodb.config; +import java.util.Properties; + public class ConfigUtils { public static ArangoConfigProperties loadConfig() { @@ -14,4 +16,8 @@ public static ArangoConfigProperties loadConfig(final String location, final Str return ArangoConfigProperties.fromFile(location, prefix); } + public static ArangoConfigProperties loadConfig(final Properties properties, final String prefix) { + return ArangoConfigProperties.fromProperties(properties, prefix); + } + } diff --git a/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java b/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java index 674c14851..109a9eb5e 100644 --- a/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java +++ b/test-functional/src/test/java/com/arangodb/internal/HostHandlerTest.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.List; +import java.util.concurrent.CompletableFuture; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.fail; @@ -39,15 +40,20 @@ class HostHandlerTest { private static final ConnectionPool mockCP = new ConnectionPool() { @Override - public Connection createConnection(HostDescription host) { + public Connection createConnection() { return null; } @Override - public Connection connection() { + public CompletableFuture connection() { return null; } + @Override + public void release(Connection connection) { + + } + @Override public void setJwt(String jwt) { diff --git a/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java b/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java index 39e9c1c43..c779464a1 100644 --- a/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java +++ b/test-functional/src/test/java/com/arangodb/model/AqlQueryOptionsTest.java @@ -15,11 +15,13 @@ void cloneable() { AqlQueryOptions options = new AqlQueryOptions() .cache(true) .stream(true) + .usePlanCache(true) .rules(rules) .shardIds("a", "b"); AqlQueryOptions clone = options.clone(); assertThat(clone.getCache()).isEqualTo(options.getCache()); assertThat(clone.getStream()).isEqualTo(options.getStream()); + assertThat(clone.getUsePlanCache()).isEqualTo(options.getUsePlanCache()); assertThat(clone.getRules()) .isEqualTo(options.getRules()) .isNotSameAs(options.getRules()); diff --git a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java index 1b5fb9d54..0f305e97c 100644 --- a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java +++ b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeAsyncTest.java @@ -50,7 +50,7 @@ /** - * @author Michele Rastelli + * NB: excluded from shaded tests */ class CustomSerdeAsyncTest { diff --git a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java index bfdc37bd5..58a736f6f 100644 --- a/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java +++ b/test-functional/src/test/java/com/arangodb/serde/CustomSerdeTest.java @@ -49,7 +49,7 @@ /** - * @author Michele Rastelli + * NB: excluded from shaded tests */ class CustomSerdeTest { diff --git a/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java b/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java index 13f925fc9..d5a3a969c 100644 --- a/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java +++ b/test-functional/src/test/java/com/arangodb/serde/JacksonInterferenceTest.java @@ -16,6 +16,9 @@ import static org.assertj.core.api.Assertions.assertThat; +/** + * NB: excluded from shaded tests + */ class JacksonInterferenceTest { private final ObjectMapper mapper = new ObjectMapper(); diff --git a/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java b/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java new file mode 100644 index 000000000..7ce63baac --- /dev/null +++ b/test-functional/src/test/java/com/arangodb/serde/JsonBTypesTest.java @@ -0,0 +1,40 @@ +package com.arangodb.serde; + +import com.arangodb.ArangoDatabase; +import com.arangodb.BaseJunit5; +import jakarta.json.Json; +import jakarta.json.JsonObject; +import jakarta.json.JsonString; +import jakarta.json.JsonValue; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collections; + +import static org.assertj.core.api.Assertions.assertThat; + +public class JsonBTypesTest extends BaseJunit5 { + + @BeforeAll + static void init() { + BaseJunit5.initDB(); + } + + @ParameterizedTest + @MethodSource("dbs") + void jsonNode(ArangoDatabase db) { + JsonObject doc = Json.createObjectBuilder() + .add("foo", "bar") + .build(); + JsonObject res = db.query("return @d", JsonObject.class, Collections.singletonMap("d", doc)).next(); + assertThat(res.size()).isEqualTo(1); + assertThat(res.getString("foo")).isEqualTo("bar"); + JsonValue value = db.query("return @d.foo", JsonValue.class, Collections.singletonMap("d", doc)).next(); + assertThat(value) + .isInstanceOf(JsonString.class) + .extracting(v -> ((JsonString) v).getString()) + .isEqualTo("bar"); + } + +} diff --git a/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java b/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java index 8a02d6422..fd98e5e37 100644 --- a/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java +++ b/test-functional/src/test/java/com/arangodb/serde/SerdeTest.java @@ -37,8 +37,8 @@ void rawBytesSerde(ContentType type) { InternalSerde s = new InternalSerdeProvider(type).create(); ObjectNode node = JsonNodeFactory.instance.objectNode().put("foo", "bar"); RawBytes raw = RawBytes.of(s.serialize(node)); - byte[] serialized = s.serialize(raw); - RawBytes deserialized = s.deserialize(serialized, RawBytes.class); + byte[] serialized = s.serializeUserData(raw); + RawBytes deserialized = s.deserializeUserData(serialized, RawBytes.class); assertThat(deserialized).isEqualTo(raw); } diff --git a/test-functional/src/test/java/com/arangodb/util/TestUtils.java b/test-functional/src/test/java/com/arangodb/util/TestUtils.java index 978fe29ee..7ee98b918 100644 --- a/test-functional/src/test/java/com/arangodb/util/TestUtils.java +++ b/test-functional/src/test/java/com/arangodb/util/TestUtils.java @@ -107,4 +107,20 @@ public static String generateRandomName(boolean extendedNames, int length) { } } + public static String unicodeEscape(String s) { + StringBuilder sb = new StringBuilder(); + s.codePoints().forEach(cp -> { + if (cp <= 0xFFFF) { + sb.append(String.format("\\u%04X", cp)); + } else { + // Convert supplementary characters to surrogate pairs + char[] surrogates = Character.toChars(cp); + for (char c : surrogates) { + sb.append(String.format("\\u%04X", (int) c)); + } + } + }); + return sb.toString(); + } + } diff --git a/test-functional/src/test/resources/META-INF/native-image/resource-config.json b/test-functional/src/test/resources/META-INF/native-image/resource-config.json index a6eea307e..9d96e052d 100644 --- a/test-functional/src/test/resources/META-INF/native-image/resource-config.json +++ b/test-functional/src/test/resources/META-INF/native-image/resource-config.json @@ -5,13 +5,13 @@ "pattern": "\\Qarangodb.properties\\E" }, { - "pattern": "\\Qarangodb-bad.properties\\E" + "pattern": "\\Qarangodb-ssl.properties\\E" }, { - "pattern": "\\Qarangodb-bad2.properties\\E" + "pattern": "\\Qarangodb-bad.properties\\E" }, { - "pattern":"\\Qarangodb-with-prefix.properties\\E" + "pattern": "\\Qarangodb-bad2.properties\\E" }, { "pattern": "\\Qlogback-test.xml\\E" diff --git a/test-functional/src/test/resources/allure.properties b/test-functional/src/test/resources/allure.properties new file mode 100644 index 000000000..80b02dde9 --- /dev/null +++ b/test-functional/src/test/resources/allure.properties @@ -0,0 +1 @@ +allure.results.directory=target/allure-results diff --git a/test-functional/src/test/resources/arangodb-ssl.properties b/test-functional/src/test/resources/arangodb-ssl.properties new file mode 100644 index 000000000..eb0c74f48 --- /dev/null +++ b/test-functional/src/test/resources/arangodb-ssl.properties @@ -0,0 +1,7 @@ +arangodb.hosts=172.28.0.1:8529 +arangodb.password=test +arangodb.useSsl=true +arangodb.sslCertValue=MIIDezCCAmOgAwIBAgIEeDCzXzANBgkqhkiG9w0BAQsFADBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMjAxMTAxMTg1MTE5WhcNMzAxMDMwMTg1MTE5WjBuMRAwDgYDVQQGEwdVbmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYDVQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1WiDnd4+uCmMG539ZNZB8NwI0RZF3sUSQGPx3lkqaFTZVEzMZL76HYvdc9Qg7difyKyQ09RLSpMALX9euSseD7bZGnfQH52BnKcT09eQ3wh7aVQ5sN2omygdHLC7X9usntxAfv7NzmvdogNXoJQyY/hSZff7RIqWH8NnAUKkjqOe6Bf5LDbxHKESmrFBxOCOnhcpvZWetwpiRdJVPwUn5P82CAZzfiBfmBZnB7D0l+/6Cv4jMuH26uAIcixnVekBQzl1RgwczuiZf2MGO64vDMMJJWE9ClZF1uQuQrwXF6qwhuP1Hnkii6wNbTtPWlGSkqeutr004+Hzbf8KnRY4PAgMBAAGjITAfMB0GA1UdDgQWBBTBrv9Awynt3C5IbaCNyOW5v4DNkTANBgkqhkiG9w0BAQsFAAOCAQEAIm9rPvDkYpmzpSIhR3VXG9Y71gxRDrqkEeLsMoEyqGnw/zx1bDCNeGg2PncLlW6zTIipEBooixIE9U7KxHgZxBy0Et6EEWvIUmnr6F4F+dbTD050GHlcZ7eOeqYTPYeQC502G1Fo4tdNi4lDP9L9XZpf7Q1QimRH2qaLS03ZFZa2tY7ah/RQqZL8Dkxx8/zc25sgTHVpxoK853glBVBs/ENMiyGJWmAXQayewY3EPt/9wGwV4KmU3dPDleQeXSUGPUISeQxFjy+jCw21pYviWVJTNBA9l5ny3GhEmcnOT/gQHCvVRLyGLMbaMZ4JrPwb+aAtBgrgeiK4xeSMMvrbhw== +arangodb.sslAlgorithm=SunX509 +arangodb.sslProtocol=TLS +arangodb.verifyHost=false diff --git a/test-functional/src/test/resources/simplelogger.properties b/test-functional/src/test/resources/simplelogger.properties index 7649bd6c7..a2a4ce6d5 100644 --- a/test-functional/src/test/resources/simplelogger.properties +++ b/test-functional/src/test/resources/simplelogger.properties @@ -9,3 +9,6 @@ org.slf4j.simpleLogger.showShortLogName=false org.slf4j.simpleLogger.defaultLogLevel=info #org.slf4j.simpleLogger.log.com.arangodb.internal.serde.JacksonUtils=debug #org.slf4j.simpleLogger.log.com.arangodb.internal.net.Communication=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.serde.InternalSerdeImpl=debug +#org.slf4j.simpleLogger.log.io.netty.handler.logging.LoggingHandler=debug +#org.slf4j.simpleLogger.log.io.netty.handler.codec.http2.Http2FrameLogger=debug diff --git a/test-non-functional/pom.xml b/test-non-functional/pom.xml index 700d6af77..c5019cce5 100644 --- a/test-non-functional/pom.xml +++ b/test-non-functional/pom.xml @@ -8,7 +8,7 @@ ../test-parent com.arangodb test-parent - 7.10.0 + 7.23.0 test-non-functional @@ -17,7 +17,6 @@ 17 17 17 - true @@ -59,7 +58,7 @@ io.smallrye.config smallrye-config-core - 2.13.3 + 3.13.1 test diff --git a/test-non-functional/src/test/java/ConfigurationTest.java b/test-non-functional/src/test/java/ConfigurationTest.java new file mode 100644 index 000000000..b004c9b38 --- /dev/null +++ b/test-non-functional/src/test/java/ConfigurationTest.java @@ -0,0 +1,49 @@ +import com.arangodb.ArangoDB; +import com.arangodb.ContentType; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.entity.ArangoDBVersion; +import com.arangodb.serde.jackson.JacksonSerde; +import org.junit.jupiter.api.Test; + +import java.util.Properties; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ConfigurationTest { + + @Test + void fallbackHost() { + final ArangoDB arangoDB = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .serde(JacksonSerde.of(ContentType.JSON)) + .host("not-accessible", 8529) + .host("172.28.0.1", 8529) + .build(); + final ArangoDBVersion version = arangoDB.getVersion(); + assertThat(version).isNotNull(); + } + + @Test + void loadPropertiesWithPrefix() { + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile("arangodb-with-prefix.properties", "adb")) + .serde(JacksonSerde.of(ContentType.JSON)) + .build(); + adb.getVersion(); + adb.shutdown(); + } + + @Test + void loadConfigFromPropertiesWithPrefix() { + Properties props = new Properties(); + props.setProperty("adb.hosts", "172.28.0.1:8529"); + props.setProperty("adb.password", "test"); + ArangoDB adb = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromProperties(props, "adb")) + .serde(JacksonSerde.of(ContentType.JSON)) + .build(); + adb.getVersion(); + adb.shutdown(); + } + +} diff --git a/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java b/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java new file mode 100644 index 000000000..a3f5200a2 --- /dev/null +++ b/test-non-functional/src/test/java/concurrency/ConnectionLoadBalanceTest.java @@ -0,0 +1,113 @@ +package concurrency; + +import com.arangodb.*; +import com.arangodb.config.ArangoConfigProperties; +import com.arangodb.internal.net.ConnectionPoolImpl; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import util.TestUtils; + +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.awaitility.Awaitility.await; + +public class ConnectionLoadBalanceTest { + private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionLoadBalanceTest.class); + + public static Stream configs() { + return Stream.of( + // FIXME: DE-1017 + // new Config(Protocol.VST, 1), + // new Config(Protocol.VST, 2), + new Config(Protocol.HTTP_JSON, 10), + new Config(Protocol.HTTP_JSON, 20), + new Config(Protocol.HTTP2_JSON, 1), + new Config(Protocol.HTTP2_JSON, 2) + ).map(Arguments::of); + } + + // Test the requests load balancing across different connections, when all the slots except 1 are busy + @MethodSource("configs") + @ParameterizedTest + void loadBalanceToAvailableSlots(Config cfg) throws InterruptedException { + doTestLoadBalance(cfg, 1); + } + + // Test the requests load balancing across different connections, when all the slots are busy + @MethodSource("configs") + @ParameterizedTest + void loadBalanceAllBusy(Config cfg) throws InterruptedException { + doTestLoadBalance(cfg, 2); + } + + void doTestLoadBalance(Config cfg, int sleepCycles) throws InterruptedException { + int longTasksCount = cfg.maxStreams() * cfg.maxConnections * sleepCycles - 1; + int shortTasksCount = 10; + long sleepDuration = 2; + + ArangoDatabaseAsync db = new ArangoDB.Builder() + .loadProperties(ArangoConfigProperties.fromFile()) + .protocol(cfg.protocol) + .serde(TestUtils.createSerde(cfg.protocol)) + .maxConnections(cfg.maxConnections) + .build().async().db(); + + LOGGER.debug("starting..."); + + CompletableFuture longRunningTasks = CompletableFuture.allOf( + IntStream.range(0, longTasksCount) + .mapToObj(__ -> + db.query("RETURN SLEEP(@duration)", Void.class, Map.of("duration", sleepDuration))) + .toArray(CompletableFuture[]::new) + ); + + Thread.sleep(100); + + CompletableFuture shortRunningTasks = CompletableFuture.allOf( + IntStream.range(0, shortTasksCount) + .mapToObj(__ -> db.getVersion()) + .toArray(CompletableFuture[]::new) + ); + + LOGGER.debug("awaiting..."); + + await() + .timeout(Duration.ofSeconds(sleepDuration * sleepCycles - 1L)) + .until(shortRunningTasks::isDone); + + LOGGER.debug("completed shortRunningTasks"); + + // join exceptional completions + shortRunningTasks.join(); + + await() + .timeout(Duration.ofSeconds(sleepDuration * sleepCycles + 2L)) + .until(longRunningTasks::isDone); + + LOGGER.debug("completed longRunningTasks"); + + // join exceptional completions + longRunningTasks.join(); + + db.arango().shutdown(); + } + + private record Config( + Protocol protocol, + int maxConnections + ) { + int maxStreams() { + return switch (protocol) { + case HTTP_JSON, HTTP_VPACK -> ConnectionPoolImpl.HTTP1_SLOTS; + default -> ConnectionPoolImpl.HTTP2_SLOTS; + }; + } + } +} diff --git a/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java b/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java index 618f229f3..bf9641e0c 100644 --- a/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java +++ b/test-non-functional/src/test/java/concurrency/ConnectionPoolConcurrencyTest.java @@ -4,10 +4,7 @@ import com.arangodb.internal.InternalRequest; import com.arangodb.internal.InternalResponse; import com.arangodb.internal.config.ArangoConfig; -import com.arangodb.internal.net.Connection; -import com.arangodb.internal.net.ConnectionFactory; -import com.arangodb.internal.net.ConnectionPool; -import com.arangodb.internal.net.ConnectionPoolImpl; +import com.arangodb.internal.net.*; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -23,7 +20,7 @@ public class ConnectionPoolConcurrencyTest { cfg.setMaxConnections(10_000); } - private final ConnectionFactory cf = (config, host) -> new Connection() { + private final ConnectionFactory cf = (config, host, pool) -> new Connection() { @Override public void setJwt(String jwt) { } @@ -33,6 +30,10 @@ public CompletableFuture executeAsync(InternalRequest request) throw new UnsupportedOperationException(); } + @Override + public void release() { + } + @Override public void close() { } @@ -45,7 +46,7 @@ void foo() throws InterruptedException, ExecutionException, IOException { List> futures = es.invokeAll(Collections.nCopies(8, (Callable) () -> { for (int i = 0; i < 10_000; i++) { - cp.createConnection(HostDescription.parse("127.0.0.1:8529")); + cp.createConnection(); cp.connection(); cp.setJwt("foo"); } diff --git a/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java b/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java index 1a0407a4c..f53dc7d06 100644 --- a/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java +++ b/test-non-functional/src/test/java/mp/ArangoConfigPropertiesMPImpl.java @@ -21,8 +21,14 @@ public final class ArangoConfigPropertiesMPImpl implements ArangoConfigPropertie private Optional jwt; private Optional timeout; private Optional useSsl; + private Optional sslCertValue; + private Optional sslAlgorithm; + private Optional sslProtocol; private Optional verifyHost; private Optional chunkSize; + private Optional pipelining; + private Optional connectionWindowSize; + private Optional initialWindowSize; private Optional maxConnections; private Optional connectionTtl; private Optional keepAliveInterval; @@ -70,6 +76,21 @@ public Optional getUseSsl() { return useSsl; } + @Override + public Optional getSslCertValue() { + return sslCertValue; + } + + @Override + public Optional getSslAlgorithm() { + return sslAlgorithm; + } + + @Override + public Optional getSslProtocol() { + return sslProtocol; + } + @Override public Optional getVerifyHost() { return verifyHost; @@ -80,6 +101,21 @@ public Optional getChunkSize() { return chunkSize; } + @Override + public Optional getPipelining() { + return pipelining; + } + + @Override + public Optional getConnectionWindowSize() { + return connectionWindowSize; + } + + @Override + public Optional getInitialWindowSize() { + return initialWindowSize; + } + @Override public Optional getMaxConnections() { return maxConnections; @@ -140,12 +176,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ArangoConfigPropertiesMPImpl that = (ArangoConfigPropertiesMPImpl) o; - return Objects.equals(hosts, that.hosts) && Objects.equals(protocol, that.protocol) && Objects.equals(user, that.user) && Objects.equals(password, that.password) && Objects.equals(jwt, that.jwt) && Objects.equals(timeout, that.timeout) && Objects.equals(useSsl, that.useSsl) && Objects.equals(verifyHost, that.verifyHost) && Objects.equals(chunkSize, that.chunkSize) && Objects.equals(maxConnections, that.maxConnections) && Objects.equals(connectionTtl, that.connectionTtl) && Objects.equals(keepAliveInterval, that.keepAliveInterval) && Objects.equals(acquireHostList, that.acquireHostList) && Objects.equals(acquireHostListInterval, that.acquireHostListInterval) && Objects.equals(loadBalancingStrategy, that.loadBalancingStrategy) && Objects.equals(responseQueueTimeSamples, that.responseQueueTimeSamples) && Objects.equals(compression, that.compression) && Objects.equals(compressionThreshold, that.compressionThreshold) && Objects.equals(compressionLevel, that.compressionLevel) && Objects.equals(serdeProviderClass, that.serdeProviderClass); + return Objects.equals(hosts, that.hosts) && Objects.equals(protocol, that.protocol) && Objects.equals(user, that.user) && Objects.equals(password, that.password) && Objects.equals(jwt, that.jwt) && Objects.equals(timeout, that.timeout) && Objects.equals(useSsl, that.useSsl) && Objects.equals(verifyHost, that.verifyHost) && Objects.equals(chunkSize, that.chunkSize) && Objects.equals(pipelining, that.pipelining) && Objects.equals(connectionWindowSize, that.connectionWindowSize) && Objects.equals(initialWindowSize, that.initialWindowSize) && Objects.equals(maxConnections, that.maxConnections) && Objects.equals(connectionTtl, that.connectionTtl) && Objects.equals(keepAliveInterval, that.keepAliveInterval) && Objects.equals(acquireHostList, that.acquireHostList) && Objects.equals(acquireHostListInterval, that.acquireHostListInterval) && Objects.equals(loadBalancingStrategy, that.loadBalancingStrategy) && Objects.equals(responseQueueTimeSamples, that.responseQueueTimeSamples) && Objects.equals(compression, that.compression) && Objects.equals(compressionThreshold, that.compressionThreshold) && Objects.equals(compressionLevel, that.compressionLevel) && Objects.equals(serdeProviderClass, that.serdeProviderClass); } @Override public int hashCode() { - return Objects.hash(hosts, protocol, user, password, jwt, timeout, useSsl, verifyHost, chunkSize, maxConnections, connectionTtl, keepAliveInterval, acquireHostList, acquireHostListInterval, loadBalancingStrategy, responseQueueTimeSamples, compression, compressionThreshold, compressionLevel, serdeProviderClass); + return Objects.hash(hosts, protocol, user, password, jwt, timeout, useSsl, verifyHost, chunkSize, pipelining, connectionWindowSize, initialWindowSize, maxConnections, connectionTtl, keepAliveInterval, acquireHostList, acquireHostListInterval, loadBalancingStrategy, responseQueueTimeSamples, compression, compressionThreshold, compressionLevel, serdeProviderClass); } @Override @@ -160,6 +196,9 @@ public String toString() { ", useSsl=" + useSsl + ", verifyHost=" + verifyHost + ", chunkSize=" + chunkSize + + ", pipelining=" + pipelining + + ", connectionWindowSize=" + connectionWindowSize + + ", initialWindowSize=" + initialWindowSize + ", maxConnections=" + maxConnections + ", connectionTtl=" + connectionTtl + ", keepAliveInterval=" + keepAliveInterval + diff --git a/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java b/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java index 3ff81bd04..fd37604d5 100644 --- a/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java +++ b/test-non-functional/src/test/java/mp/ConfigMPDefaultsTest.java @@ -23,8 +23,14 @@ private void checkResult(ArangoConfigProperties config) { assertThat(config.getJwt()).isNotPresent(); assertThat(config.getTimeout()).isEmpty(); assertThat(config.getUseSsl()).isEmpty(); + assertThat(config.getSslCertValue()).isEmpty(); + assertThat(config.getSslAlgorithm()).isEmpty(); + assertThat(config.getSslProtocol()).isEmpty(); assertThat(config.getVerifyHost()).isEmpty(); assertThat(config.getChunkSize()).isEmpty(); + assertThat(config.getPipelining()).isEmpty(); + assertThat(config.getConnectionWindowSize()).isEmpty(); + assertThat(config.getInitialWindowSize()).isEmpty(); assertThat(config.getMaxConnections()).isNotPresent(); assertThat(config.getConnectionTtl()).isNotPresent(); assertThat(config.getKeepAliveInterval()).isNotPresent(); diff --git a/test-non-functional/src/test/java/mp/ConfigMPTest.java b/test-non-functional/src/test/java/mp/ConfigMPTest.java index 4a7aaa993..5bd03576e 100644 --- a/test-non-functional/src/test/java/mp/ConfigMPTest.java +++ b/test-non-functional/src/test/java/mp/ConfigMPTest.java @@ -21,8 +21,14 @@ class ConfigMPTest { private final String jwt = "testJwt"; private final Integer timeout = 9876; private final Boolean useSsl = true; + private final String sslCertValue = "sslCertValue"; + private final String sslAlgorithm = "sslAlgorithm"; + private final String sslProtocol = "sslProtocol"; private final Boolean verifyHost = false; private final Integer vstChunkSize = 1234; + private final Boolean pipelining = true; + private final Integer connectionWindowSize = 987; + private final Integer initialWindowSize = 876; private final Integer maxConnections = 123; private final Long connectionTtl = 12345L; private final Integer keepAliveInterval = 123456; @@ -56,8 +62,14 @@ private void checkResult(ArangoConfigProperties config) { .hasValue(jwt); assertThat(config.getTimeout()).hasValue(timeout); assertThat(config.getUseSsl()).hasValue(useSsl); + assertThat(config.getSslCertValue()).hasValue(sslCertValue); + assertThat(config.getSslAlgorithm()).hasValue(sslAlgorithm); + assertThat(config.getSslProtocol()).hasValue(sslProtocol); assertThat(config.getVerifyHost()).hasValue(verifyHost); assertThat(config.getChunkSize()).hasValue(vstChunkSize); + assertThat(config.getPipelining()).hasValue(pipelining); + assertThat(config.getConnectionWindowSize()).hasValue(connectionWindowSize); + assertThat(config.getInitialWindowSize()).hasValue(initialWindowSize); assertThat(config.getMaxConnections()) .isPresent() .hasValue(maxConnections); diff --git a/test-non-functional/src/test/java/mp/ConfigUtilsMP.java b/test-non-functional/src/test/java/mp/ConfigUtilsMP.java index dabd62cde..07277115f 100644 --- a/test-non-functional/src/test/java/mp/ConfigUtilsMP.java +++ b/test-non-functional/src/test/java/mp/ConfigUtilsMP.java @@ -1,7 +1,7 @@ package mp; import com.arangodb.config.ArangoConfigProperties; -import io.smallrye.config.PropertiesConfigSourceProvider; +import io.smallrye.config.PropertiesConfigSourceLoader; import io.smallrye.config.SmallRyeConfig; import io.smallrye.config.SmallRyeConfigBuilder; @@ -17,7 +17,7 @@ public static ArangoConfigProperties loadConfigMP(final String location) { public static ArangoConfigProperties loadConfigMP(final String location, final String prefix) { SmallRyeConfig cfg = new SmallRyeConfigBuilder() - .withSources(new PropertiesConfigSourceProvider(location, ConfigUtilsMP.class.getClassLoader(), false)) + .withSources(PropertiesConfigSourceLoader.inClassPath(location, 0, ConfigUtilsMP.class.getClassLoader())) .withMapping(ArangoConfigPropertiesMPImpl.class, prefix) .build(); return cfg.getConfigMapping(ArangoConfigPropertiesMPImpl.class, prefix); diff --git a/test-non-functional/src/test/resources/arangodb-config-test.properties b/test-non-functional/src/test/resources/arangodb-config-test.properties index ef25aaf11..f63749428 100644 --- a/test-non-functional/src/test/resources/arangodb-config-test.properties +++ b/test-non-functional/src/test/resources/arangodb-config-test.properties @@ -5,8 +5,14 @@ adb.password=testPassword adb.jwt=testJwt adb.timeout=9876 adb.useSsl=true +adb.sslCertValue=sslCertValue +adb.sslAlgorithm=sslAlgorithm +adb.sslProtocol=sslProtocol adb.verifyHost=false adb.chunkSize=1234 +adb.pipelining=true +adb.connectionWindowSize=987 +adb.initialWindowSize=876 adb.maxConnections=123 adb.connectionTtl=12345 adb.keepAliveInterval=123456 diff --git a/test-functional/src/test/resources/arangodb-with-prefix.properties b/test-non-functional/src/test/resources/arangodb-with-prefix.properties similarity index 100% rename from test-functional/src/test/resources/arangodb-with-prefix.properties rename to test-non-functional/src/test/resources/arangodb-with-prefix.properties diff --git a/test-non-functional/src/test/resources/simplelogger.properties b/test-non-functional/src/test/resources/simplelogger.properties index 7649bd6c7..495a73812 100644 --- a/test-non-functional/src/test/resources/simplelogger.properties +++ b/test-non-functional/src/test/resources/simplelogger.properties @@ -9,3 +9,6 @@ org.slf4j.simpleLogger.showShortLogName=false org.slf4j.simpleLogger.defaultLogLevel=info #org.slf4j.simpleLogger.log.com.arangodb.internal.serde.JacksonUtils=debug #org.slf4j.simpleLogger.log.com.arangodb.internal.net.Communication=debug +#org.slf4j.simpleLogger.log.io.netty.handler.logging.LoggingHandler=debug +#org.slf4j.simpleLogger.log.io.netty.handler.codec.http2.Http2FrameLogger=debug +#org.slf4j.simpleLogger.log.com.arangodb.internal.util.AsyncQueue=trace diff --git a/test-parent/pom.xml b/test-parent/pom.xml index b6e977667..fe7ff6b91 100644 --- a/test-parent/pom.xml +++ b/test-parent/pom.xml @@ -7,7 +7,7 @@ com.arangodb arangodb-java-driver-parent - 7.10.0 + 7.23.0 pom @@ -15,11 +15,11 @@ false - 2.18.0 + 2.20.0 true - true 17 17 + src/test/java @@ -59,6 +59,11 @@ assertj-core test + + org.awaitility + awaitility + test + @@ -73,29 +78,35 @@ org.junit junit-bom - 5.10.2 + 5.14.0 pom import org.eclipse yasson - 3.0.3 + 3.0.4 org.slf4j slf4j-simple - 2.0.9 + 2.0.17 org.assertj assertj-core - 3.25.3 + 3.27.6 + + + org.awaitility + awaitility + 4.3.0 + test com.tngtech.archunit archunit-junit5 - 1.2.1 + 1.4.1 @@ -120,7 +131,7 @@ org.apache.maven.plugins maven-failsafe-plugin - 3.3.1 + 3.5.4 true @@ -153,7 +164,7 @@ - src/test/java + ${testSources} false @@ -209,7 +220,7 @@ - ${project.basedir}/src/test/java + ${project.basedir}/${testSources} ** ${project.build.directory}/generated-test-sources replacer diff --git a/test-perf/README.md b/test-perf/README.md new file mode 100644 index 000000000..2e35ccd9e --- /dev/null +++ b/test-perf/README.md @@ -0,0 +1,21 @@ +# Serde performance tests + +``` +mvn clean package -am -pl test-perf +java -cp test-perf/target/benchmarks.jar com.arangodb.SerdeBench +``` + +## 19/12/2024 + +- `main f613d3d6` +- `benchmark/base 1e45f8c4` + +``` +Benchmark Mode Cnt Score Score main/base +SerdeBench.deserializeDocsJson avgt 10 0.155 0.149 0.961290322580645 +SerdeBench.deserializeDocsVPack avgt 10 0.209 0.126 0.602870813397129 +SerdeBench.extractBytesJson avgt 10 2.705 0.297 0.109796672828096 +SerdeBench.extractBytesVPack avgt 10 1.12 0.133 0.11875 +SerdeBench.rawJsonDeser avgt 10 6.016 6.116 1.01662234042553 +SerdeBench.rawJsonSer avgt 10 7.711 7.222 0.936584100635456 +``` diff --git a/test-perf/pom.xml b/test-perf/pom.xml new file mode 100644 index 000000000..448b289b5 --- /dev/null +++ b/test-perf/pom.xml @@ -0,0 +1,82 @@ + + + 4.0.0 + + ../test-parent + com.arangodb + test-parent + 7.23.0 + + + test-perf + + + 1.37 + benchmarks + + + + + org.slf4j + slf4j-simple + compile + + + org.openjdk.jmh + jmh-core + ${jmh.version} + compile + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + ${uberjar.name} + + + org.openjdk.jmh.Main + + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + + + + + diff --git a/test-perf/src/main/java/com/arangodb/SerdeBench.java b/test-perf/src/main/java/com/arangodb/SerdeBench.java new file mode 100644 index 000000000..3577e96a0 --- /dev/null +++ b/test-perf/src/main/java/com/arangodb/SerdeBench.java @@ -0,0 +1,199 @@ +package com.arangodb; + +import com.arangodb.entity.MultiDocumentEntity; +import com.arangodb.internal.ArangoCollectionImpl; +import com.arangodb.internal.ArangoDatabaseImpl; +import com.arangodb.internal.ArangoExecutor; +import com.arangodb.internal.InternalResponse; +import com.arangodb.internal.serde.InternalSerde; +import com.arangodb.internal.serde.InternalSerdeProvider; +import com.arangodb.jackson.dataformat.velocypack.VPackMapper; +import com.arangodb.util.RawBytes; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.JsonNode; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; +import org.openjdk.jmh.profile.GCProfiler; +import org.openjdk.jmh.results.format.ResultFormatType; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 8, time = 1) +@Measurement(iterations = 10, time = 1) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@Fork(1) +public class SerdeBench { + public static class MyCol extends ArangoCollectionImpl { + static ArangoDB jsonAdb = new ArangoDB.Builder() + .host("127.0.0.1", 8529) + .protocol(Protocol.HTTP_JSON) + .build(); + + static ArangoDB vpackAdb = new ArangoDB.Builder() + .host("127.0.0.1", 8529) + .protocol(Protocol.HTTP_VPACK) + .build(); + + private MyCol(ArangoDB adb) { + super((ArangoDatabaseImpl) adb.db(), "foo"); + } + + public static MyCol ofJson() { + return new MyCol(jsonAdb); + } + + public static MyCol ofVpack() { + return new MyCol(vpackAdb); + } + + @Override + public ArangoExecutor.ResponseDeserializer> getDocumentsResponseDeserializer(Class type) { + return super.getDocumentsResponseDeserializer(type); + } + } + + @State(Scope.Benchmark) + public static class Data { + public final byte[] vpack; + public final byte[] json; + public final RawBytes rawJsonBytes; + public final RawBytes rawVPackBytes; + public final RawJson rawJson; + public final MyCol jsonCol = MyCol.ofJson(); + public final MyCol vpackCol = MyCol.ofVpack(); + public final InternalResponse jsonResp = new InternalResponse(); + public final InternalResponse vpackResp = new InternalResponse(); + + public Data() { + ObjectMapper jsonMapper = new ObjectMapper(); + VPackMapper vpackMapper = new VPackMapper(); + + try { + JsonNode jn = readFile("/api-docs.json", jsonMapper); + json = jsonMapper.writeValueAsBytes(jn); + vpack = vpackMapper.writeValueAsBytes(jn); + rawJsonBytes = RawBytes.of(json); + rawVPackBytes = RawBytes.of(vpack); + rawJson = RawJson.of(jsonMapper.writeValueAsString(jsonMapper.readTree(json))); + + JsonNode docs = readFile("/multi-docs.json", jsonMapper); + jsonResp.setResponseCode(200); + jsonResp.setBody(jsonMapper.writeValueAsBytes(docs)); + vpackResp.setResponseCode(200); + vpackResp.setBody(vpackMapper.writeValueAsBytes(docs)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private JsonNode readFile(String filename, ObjectMapper mapper) throws IOException { + InputStream inputStream = SerdeBench.class.getResourceAsStream(filename); + String str = readFromInputStream(inputStream); + return mapper.readTree(str); + } + + private String readFromInputStream(InputStream inputStream) throws IOException { + StringBuilder resultStringBuilder = new StringBuilder(); + try (BufferedReader br = new BufferedReader(new InputStreamReader(inputStream))) { + String line; + while ((line = br.readLine()) != null) { + resultStringBuilder.append(line).append("\n"); + } + } + return resultStringBuilder.toString(); + } + } + + public static void main(String[] args) throws RunnerException, IOException { + String datetime = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()); + Path target = Files.createDirectories(Paths.get("target", "jmh-result")); + + ArrayList jvmArgs = new ArrayList<>(); + jvmArgs.add("-Xms256m"); + jvmArgs.add("-Xmx256m"); + if (Integer.parseInt(System.getProperty("java.version").split("\\.")[0]) >= 11) { + jvmArgs.add("-XX:StartFlightRecording=filename=" + target.resolve(datetime + ".jfr") + ",settings=profile"); + } + + Options opt = new OptionsBuilder() + .include(SerdeBench.class.getSimpleName()) + .addProfiler(GCProfiler.class) + .jvmArgs(jvmArgs.toArray(new String[0])) + .resultFormat(ResultFormatType.JSON) + .result(target.resolve(datetime + ".json").toString()) + .build(); + + new Runner(opt).run(); + } + + @Benchmark + public void rawJsonDeser(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.deserialize(data.vpack, RawJson.class) + ); + } + + @Benchmark + public void rawJsonSer(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.serialize(data.rawJson) + ); + } + + @Benchmark + public void extractBytesVPack(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.VPACK).create(); + bh.consume( + serde.extract(data.vpack, "/definitions/put_api_simple_remove_by_example_opts") + ); + } + + @Benchmark + public void extractBytesJson(Data data, Blackhole bh) { + InternalSerde serde = new InternalSerdeProvider(ContentType.JSON).create(); + bh.consume( + serde.extract(data.json, "/definitions/put_api_simple_remove_by_example_opts") + ); + } + + @Benchmark + public void deserializeDocsJson(Data data, Blackhole bh) { + bh.consume( + data.jsonCol.getDocumentsResponseDeserializer(RawBytes.class).deserialize(data.jsonResp) + ); + } + + @Benchmark + public void deserializeDocsVPack(Data data, Blackhole bh) { + bh.consume( + data.vpackCol.getDocumentsResponseDeserializer(RawBytes.class).deserialize(data.vpackResp) + ); + } + +} diff --git a/test-perf/src/main/resources/api-docs.json b/test-perf/src/main/resources/api-docs.json new file mode 100644 index 000000000..d23f57331 --- /dev/null +++ b/test-perf/src/main/resources/api-docs.json @@ -0,0 +1,7377 @@ +{ + "basePath": "/", + "definitions": { + "JSA_get_api_collection_figures_rc_200": { + "properties": { + "count": { + "description": "The number of documents currently present in the collection.
", + "format": "int64", + "type": "integer" + }, + "figures": { + "$ref": "#/definitions/collection_figures" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "count", + "journalSize" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSA_put_api_simple_any": { + "properties": { + "collection": { + "description": "The identifier or name of the collection to query.
Returns a JSON object with the document stored in the attribute document if the collection contains at least one document. If the collection is empty, the document attrbute contains null.
", + "type": "string" + } + }, + "required": [ + "collection" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "example": { + "description": "The example document.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "example", + "skip", + "limit" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_first": { + "properties": { + "collection": { + "description": "the name of the collection
", + "type": "string" + }, + "count": { + "description": "the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
", + "type": "string" + } + }, + "required": [ + "collection" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_first_example": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "example": { + "description": "The example document.
", + "type": "string" + } + }, + "required": [ + "collection", + "example" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_fulltext": { + "properties": { + "attribute": { + "description": "The attribute that contains the texts.
", + "type": "string" + }, + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "index": { + "description": "The identifier of the fulltext-index to use.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "type": "string" + }, + "query": { + "description": "The fulltext query. Please refer to [Fulltext queries](../SimpleQueries/FulltextQueries.html) for details.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "attribute", + "query", + "skip", + "limit", + "index" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_last": { + "properties": { + "collection": { + "description": " the name of the collection
", + "type": "string" + }, + "count": { + "description": "the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "collection", + "count" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_near": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "distance": { + "description": "If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude": { + "description": "The latitude of the coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude": { + "description": "The longitude of the coordinate.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude", + "longitude", + "distance", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_range": { + "properties": { + "attribute": { + "description": "The attribute path to check.
", + "type": "string" + }, + "closed": { + "description": "If true, use interval including left and right, otherwise exclude right, but include left.
", + "format": "", + "type": "boolean" + }, + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "left": { + "description": "The lower bound.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
", + "format": "int64", + "type": "integer" + }, + "right": { + "description": "The upper bound.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query (optional).
", + "type": "string" + } + }, + "required": [ + "collection", + "attribute", + "left", + "right", + "closed", + "skip" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_remove_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to remove from.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "options": { + "$ref": "#/definitions/put_api_simple_remove_by_example_opts" + } + }, + "required": [ + "collection", + "example" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_replace_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to replace within.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "newValue": { + "description": "The replacement document that will get inserted in place of the \"old\" documents.
", + "type": "string" + }, + "options": { + "$ref": "#/definitions/put_api_simple_replace_by_example_options" + } + }, + "required": [ + "collection", + "example", + "newValue" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_update_by_example": { + "properties": { + "collection": { + "description": "The name of the collection to update within.
", + "type": "string" + }, + "example": { + "description": "An example document that all collection documents are compared against.
", + "type": "string" + }, + "newValue": { + "additionalProperties": {}, + "description": "A document containing all the attributes to update in the found documents.
", + "type": "object" + }, + "options": { + "$ref": "#/definitions/put_api_simple_update_by_example_options" + } + }, + "required": [ + "collection", + "example", + "newValue" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_within": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "distance": { + "description": "If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude": { + "description": "The latitude of the coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude": { + "description": "The longitude of the coordinate.
", + "type": "string" + }, + "radius": { + "description": "The maximal radius (in meters).
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude", + "longitude", + "radius", + "distance", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSA_put_api_simple_within_rectangle": { + "properties": { + "collection": { + "description": "The name of the collection to query.
", + "type": "string" + }, + "geo": { + "description": "If given, the identifier of the geo-index to use. (optional)
", + "type": "string" + }, + "latitude1": { + "description": "The latitude of the first rectangle coordinate.
", + "type": "string" + }, + "latitude2": { + "description": "The latitude of the second rectangle coordinate.
", + "type": "string" + }, + "limit": { + "description": "The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
", + "type": "string" + }, + "longitude1": { + "description": "The longitude of the first rectangle coordinate.
", + "type": "string" + }, + "longitude2": { + "description": "The longitude of the second rectangle coordinate.
", + "type": "string" + }, + "skip": { + "description": "The number of documents to skip in the query. (optional)
", + "type": "string" + } + }, + "required": [ + "collection", + "latitude1", + "longitude1", + "latitude2", + "longitude2", + "skip", + "limit", + "geo" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "JSF_HTTP_API_TRAVERSAL": { + "properties": { + "direction": { + "description": "direction for traversal
  • if set, must be either \"outbound\", \"inbound\", or \"any\"
  • if not set, the expander attribute must be specified
", + "type": "string" + }, + "edgeCollection": { + "description": "name of the collection that contains the edges.
", + "type": "string" + }, + "expander": { + "description": "body (JavaScript) code of custom expander function must be set if direction attribute is not set function signature: (config, vertex, path) -> array expander must return an array of the connections for vertex each connection is an object with the attributes edge and vertex
", + "type": "string" + }, + "filter": { + "description": "default is to include all nodes: body (JavaScript code) of custom filter function function signature: (config, vertex, path) -> mixed can return four different string values:
  • \"exclude\" -> this vertex will not be visited.
  • \"prune\" -> the edges of this vertex will not be followed.
  • \"\" or undefined -> visit the vertex and follow it's edges.
  • Array -> containing any combination of the above. If there is at least one \"exclude\" or \"prune\" respectivly is contained, it's effect will occur.
", + "type": "string" + }, + "graphName": { + "description": "name of the graph that contains the edges. Either edgeCollection or graphName has to be given. In case both values are set the graphName is prefered.
", + "type": "string" + }, + "init": { + "description": "body (JavaScript) code of custom result initialization function function signature: (config, result) -> void initialize any values in result with what is required
", + "type": "string" + }, + "itemOrder": { + "description": "item iteration order can be \"forward\" or \"backward\"
", + "type": "string" + }, + "maxDepth": { + "description": "ANDed with any existing filters visits only nodes in at most the given depth
", + "type": "string" + }, + "maxIterations": { + "description": "Maximum number of iterations in each traversal. This number can be set to prevent endless loops in traversal of cyclic graphs. When a traversal performs as many iterations as the maxIterations value, the traversal will abort with an error. If maxIterations is not set, a server-defined value may be used.
", + "type": "string" + }, + "minDepth": { + "description": "ANDed with any existing filters): visits only nodes in at least the given depth
", + "type": "string" + }, + "order": { + "description": "traversal order can be \"preorder\", \"postorder\" or \"preorder-expander\"
", + "type": "string" + }, + "sort": { + "description": "body (JavaScript) code of a custom comparison function for the edges. The signature of this function is (l, r) -> integer (where l and r are edges) and must return -1 if l is smaller than, +1 if l is greater than, and 0 if l and r are equal. The reason for this is the following: The order of edges returned for a certain vertex is undefined. This is because there is no natural order of edges for a vertex with multiple connected edges. To explicitly define the order in which edges on the vertex are followed, you can specify an edge comparator function with this attribute. Note that the value here has to be a string to conform to the JSON standard, which in turn is parsed as function body on the server side. Furthermore note that this attribute is only used for the standard expanders. If you use your custom expander you have to do the sorting yourself within the expander code.
", + "type": "string" + }, + "startVertex": { + "description": "id of the startVertex, e.g. \"users/foo\".
", + "type": "string" + }, + "strategy": { + "description": "traversal strategy can be \"depthfirst\" or \"breadthfirst\"
", + "type": "string" + }, + "uniqueness": { + "description": "specifies uniqueness for vertices and edges visited if set, must be an object like this:
\"uniqueness\": {\"vertices\": \"none\"|\"global\"|\"path\", \"edges\": \"none\"|\"global\"|\"path\"}
", + "type": "string" + }, + "visitor": { + "description": "body (JavaScript) code of custom visitor function function signature: (config, result, vertex, path, connected) -> void The visitor function can do anything, but its return value is ignored. To populate a result, use the result variable by reference. Note that the connected argument is only populated when the order attribute is set to \"preorder-expander\".
", + "type": "string" + } + }, + "required": [ + "startVertex" + ], + "type": "object", + "x-filename": "Graph Traversal - js/actions/api-traversal.js" + }, + "JSF_cluster_dispatcher_POST": { + "properties": { + "action": { + "description": "can be one of the following: - \"launch\": the cluster is launched for the first time, all data directories and log files are cleaned and created - \"shutdown\": the cluster is shut down, the additional property runInfo (see below) must be bound as well - \"relaunch\": the cluster is launched again, all data directories and log files are untouched and need to be there already - \"cleanup\": use this after a shutdown to remove all data in the data directories and all log files, use with caution - \"isHealthy\": checks whether or not the processes involved in the cluster are running or not. The additional property runInfo (see above) must be bound as well - \"upgrade\": performs an upgrade of a cluster, to this end, the agency is started, and then every server is once started with the \"--upgrade\" option, and then normally. Finally, the script \"verion-check.js\" is run on one of the coordinators for the cluster.
", + "type": "string" + }, + "clusterPlan": { + "additionalProperties": {}, + "description": "is a cluster plan (see JSF_cluster_planner_POST),
", + "type": "object" + }, + "myname": { + "description": "is the ID of this dispatcher, this is used to decide which commands are executed locally and which are forwarded to other dispatchers
", + "type": "string" + }, + "runInfo": { + "additionalProperties": {}, + "description": "this is needed for the \"shutdown\" and \"isHealthy\" actions only and should be the structure that \"launch\", \"relaunch\" or \"upgrade\" returned. It contains runtime information like process IDs.
", + "type": "object" + } + }, + "required": [ + "clusterPlan", + "myname", + "action" + ], + "type": "object", + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "JSF_general_graph_create_http_examples": { + "properties": { + "edgeDefinitions": { + "description": "An array of definitions for the edge
", + "type": "string" + }, + "name": { + "description": "Name of the graph.
", + "type": "string" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.
", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_general_graph_edge_definition_add_http_examples": { + "properties": { + "collection": { + "description": "The name of the edge collection to be used.
", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "One or many edge collections that can contain target vertices.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_general_graph_edge_definition_modify_http_examples": { + "properties": { + "collection": { + "description": "The name of the edge collection to be used.
", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "One or many edge collections that can contain target vertices.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object", + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "JSF_get_api_database_new": { + "properties": { + "active": { + "description": "A Flag indicating whether the user account should be activated or not. The default value is true.
", + "format": "", + "type": "boolean" + }, + "extra": { + "additionalProperties": {}, + "description": "A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
", + "type": "object" + }, + "name": { + "description": "Has to contain a valid database name.
", + "type": "string" + }, + "passwd": { + "description": "The user password as a string. If not specified, it will default to an empty string.
", + "type": "string" + }, + "username": { + "description": "The user name as a string. If users is not specified or does not contain any users, a default user root will be created with an empty string password. This ensures that the new database will be accessible after it is created.
", + "type": "string" + }, + "users": { + "description": "Has to be a list of user objects to initially create for the new database. Each user object can contain the following attributes:
", + "items": { + "$ref": "#/definitions/JSF_get_api_database_new_USERS" + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Database - js/actions/api-database.js" + }, + "JSF_get_api_database_new_USERS": { + "description": "", + "properties": { + "active": { + "description": "if False the user won't be able to log into the database.
", + "type": "boolean" + }, + "passwd": { + "description": "Password for the user
", + "type": "string" + }, + "username": { + "description": "Loginname of the user to be created
", + "type": "string" + } + }, + "type": "object" + }, + "JSF_get_api_return_rc_200": { + "properties": { + "details": { + "additionalProperties": {}, + "description": "an optional JSON object with additional details. This is returned only if the details URL parameter is set to true in the request.
", + "type": "object" + }, + "server": { + "description": "will always contain arango
", + "type": "string" + }, + "version": { + "description": "the server version string. The string has the format \"major.*minor.*sub\". major and minor will be numeric, and sub may contain a number or a textual version.
", + "type": "string" + } + }, + "required": [ + "server", + "version" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_post_api_aqlfunction": { + "properties": { + "code": { + "description": "a string representation of the function body.
", + "type": "string" + }, + "isDeterministic": { + "description": "an optional boolean value to indicate that the function results are fully deterministic (function return value solely depends on the input value and return value is the same for repeated calls with same input). The isDeterministic attribute is currently not used but may be used later for optimisations.
", + "format": "", + "type": "boolean" + }, + "name": { + "description": "the fully qualified name of the user functions.
", + "type": "string" + } + }, + "required": [ + "name", + "code" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "JSF_post_api_collection": { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSF_post_api_collection_opts": { + "description": "additional options for key generation. If specified, then keyOptions should be a JSON array containing the following attributes:
", + "properties": { + "allowUserKeys": { + "description": "if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator will solely be responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
", + "type": "boolean" + }, + "increment": { + "description": "increment value for autoincrement key generator. Not used for other key generator types.
", + "format": "int64", + "type": "integer" + }, + "offset": { + "description": "Initial offset value for autoincrement key generator. Not used for other key generator types.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "specifies the type of the key generator. The currently available generators are traditional and autoincrement.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "JSF_post_api_cursor": { + "properties": { + "batchSize": { + "description": "maximum number of result documents to be transferred from the server to the client in one roundtrip. If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed.
", + "format": "int64", + "type": "integer" + }, + "bindVars": { + "description": "list of bind parameter objects.
", + "items": { + "additionalProperties": {}, + "type": "object" + }, + "type": "array" + }, + "cache": { + "description": "flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup will be skipped for the query. If set to true, it will lead to the query cache being checked for the query if the query cache mode is either on or demand.
", + "format": "", + "type": "boolean" + }, + "count": { + "description": "indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result. Calculating the \"count\" attribute might in the future have a performance impact for some queries so this option is turned off by default, and \"count\" is only returned when requested.
", + "format": "", + "type": "boolean" + }, + "options": { + "$ref": "#/definitions/JSF_post_api_cursor_opts" + }, + "query": { + "description": "contains the query string to be executed
", + "type": "string" + }, + "ttl": { + "description": "The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_opts": { + "description": "key/value object with extra options for the query.
", + "properties": { + "fullCount": { + "description": "if set to true and the query contains a LIMIT clause, then the result will contain an extra attribute extra with a sub-attribute fullCount. This sub-attribute will contain the number of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and thus make queries run longer. Note that the fullCount sub-attribute will only be present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the query.
", + "type": "boolean" + }, + "maxPlans": { + "description": "limits the maximum number of plans that are created by the AQL query optimizer.
", + "format": "int64", + "type": "integer" + }, + "optimizer.rules": { + "description": "a list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules.
", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + }, + "profile": { + "description": "if set to true, then the additional query profiling information will be returned in the extra.stats return attribute if the query result is not served from the query cache.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_rc_201": { + "properties": { + "cached": { + "description": "a boolean flag indicating whether the query result was served from the query cache or not. If the query result is served from the query cache, the extra return attribute will not contain any stats sub-attribute and no profile sub-attribute.
", + "format": "", + "type": "boolean" + }, + "code": { + "description": "the HTTP status code
", + "format": "integer", + "type": "integer" + }, + "count": { + "description": "the total number of result documents available (only available if the query was executed with the count attribute set)
", + "format": "int64", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (false in this case)
", + "format": "", + "type": "boolean" + }, + "extra": { + "additionalProperties": {}, + "description": "an optional JSON object with extra information about the query result contained in its stats sub-attribute. For data-modification queries, the extra.stats sub-attribute will contain the number of modified documents and the number of documents that could not be modified due to an error (if ignoreErrors query option is specified)
", + "type": "object" + }, + "hasMore": { + "description": "A boolean indicator whether there are more results available for the cursor on the server
", + "format": "", + "type": "boolean" + }, + "id": { + "description": "id of temporary cursor created on the server (optional, see above)
", + "type": "string" + }, + "result": { + "description": "an array of result documents (might be empty if query has no results)
", + "items": {}, + "type": "array" + } + }, + "required": [ + "error", + "code", + "hasMore", + "id", + "cached" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_cursor_rc_400": { + "properties": { + "code": { + "description": "the HTTP status code
", + "format": "int64", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate that an error occurred (true in this case)
", + "format": "", + "type": "boolean" + }, + "errorMessage": { + "description": "a descriptive error message
If the query specification is complete, the server will process the query. If an error occurs during query processing, the server will respond with HTTP 400. Again, the body of the response will contain details about the error.
A list of query errors can be found (../ArangoErrors/README.md) here.

", + "type": "string" + }, + "errorNum": { + "description": "the server error number
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object", + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "JSF_post_api_explain": { + "properties": { + "bindVars": { + "description": "key/value pairs representing the bind values
", + "items": { + "additionalProperties": {}, + "type": "object" + }, + "type": "array" + }, + "options": { + "$ref": "#/definitions/explain_options" + }, + "query": { + "description": "the query which you want explained; If the query references any bind variables, these must also be passed in the attribute bindVars. Additional options for the query can be passed in the options attribute.
", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "JSF_post_api_export": { + "properties": { + "batchSize": { + "description": "maximum number of result documents to be transferred from the server to the client in one roundtrip (optional). If this attribute is not set, a server-controlled default value will be used.
", + "format": "int64", + "type": "integer" + }, + "count": { + "description": "boolean flag that indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result (optional). Calculating the \"count\" attribute might in the future have a performance impact so this option is turned off by default, and \"count\" is only returned when requested.
", + "format": "", + "type": "boolean" + }, + "flush": { + "description": "if set to true, a WAL flush operation will be executed prior to the export. The flush operation will start copying documents from the WAL to the collection's datafiles. There will be an additional wait time of up to flushWait seconds after the flush to allow the WAL collector to change the adjusted document meta-data to point into the datafiles, too. The default value is false (i.e. no flush) so most recently inserted or updated documents from the collection might be missing in the export.
", + "format": "", + "type": "boolean" + }, + "flushWait": { + "description": "maximum wait time in seconds after a flush operation. The default value is 10. This option only has an effect when flush is set to true.
", + "format": "int64", + "type": "integer" + }, + "limit": { + "description": "an optional limit value, determining the maximum number of documents to be included in the cursor. Omitting the limit attribute or setting it to 0 will lead to no limit being used. If a limit is used, it is undefined which documents from the collection will be included in the export and which will be excluded. This is because there is no natural order of documents in a collection.
", + "format": "int64", + "type": "integer" + }, + "restrict": { + "$ref": "#/definitions/JSF_post_api_export_restrictions" + }, + "ttl": { + "description": "an optional time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "flush", + "flushWait", + "count", + "batchSize", + "limit", + "ttl" + ], + "type": "object", + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + }, + "JSF_post_api_export_restrictions": { + "description": "an object containing an array of attribute names that will be included or excluded when returning result documents.
Not specifying restrict will by default return all attributes of each document.
", + "properties": { + "fields": { + "description": "Contains an array of attribute names to include or exclude. Matching of attribute names for inclusion or exclusion will be done on the top level only. Specifying names of nested attributes is not supported at the moment.

", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "has to be be set to either include or exclude depending on which you want to use
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + }, + "JSF_post_api_index_cap": { + "properties": { + "byteSize": { + "description": "The maximal size of the active document data in the collection (in bytes). If specified, the value must be at least 16384.

", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The maximal number of documents for the collection. If specified, the value must be greater than zero.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "must be equal to \"cap\".
", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_fulltext": { + "properties": { + "fields": { + "description": "an array of attribute names. Currently, the array is limited to exactly one attribute.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "minLength": { + "description": "Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.
", + "format": "int64", + "type": "integer" + }, + "type": { + "description": "must be equal to \"fulltext\".
", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "minLength" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_geo": { + "properties": { + "fields": { + "description": "An array with one or two attribute paths.
If it is an array with one attribute path location, then a geo-spatial index on all documents is created using location as path to the coordinates. The value of the attribute must be an array with at least two double values. The array must contain the latitude (first value) and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
If it is an array with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "geoJson": { + "description": "If a geo-spatial index on a location is constructed and geoJson is true, then the order within the array is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
", + "type": "string" + }, + "type": { + "description": "must be equal to \"geo\".
", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "geoJson" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_hash": { + "properties": { + "fields": { + "description": "an array of attribute paths.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "sparse": { + "description": "if true, then create a sparse index.
", + "format": "", + "type": "boolean" + }, + "type": { + "description": "must be equal to \"hash\".
", + "type": "string" + }, + "unique": { + "description": "if true, then create a unique index.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "type", + "fields", + "unique", + "sparse" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_index_skiplist": { + "properties": { + "fields": { + "description": "an array of attribute paths.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "sparse": { + "description": "if true, then create a sparse index.
", + "format": "", + "type": "boolean" + }, + "type": { + "description": "must be equal to \"skiplist\".
", + "type": "string" + }, + "unique": { + "description": "if true, then create a unique index.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "type", + "fields", + "unique", + "sparse" + ], + "type": "object", + "x-filename": "Indexes - js/actions/api-index.js" + }, + "JSF_post_api_new_tasks": { + "properties": { + "command": { + "description": "The JavaScript code to be executed
", + "type": "string" + }, + "name": { + "description": "The name of the task
", + "type": "string" + }, + "offset": { + "description": "Number of seconds initial delay
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "The parameters to be passed into command
", + "type": "string" + }, + "period": { + "description": "number of seconds between the executions
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "name", + "command", + "params" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_post_api_transaction": { + "properties": { + "action": { + "description": "the actual transaction operations to be executed, in the form of stringified JavaScript code. The code will be executed on server side, with late binding. It is thus critical that the code specified in action properly sets up all the variables it needs. If the code specified in action ends with a return statement, the value returned will also be returned by the REST API in the result attribute if the transaction committed successfully.
", + "type": "string" + }, + "collections": { + "description": "contains the array of collections to be used in the transaction (mandatory). collections must be a JSON object that can have the optional sub-attributes read and write. read and write must each be either arrays of collections names or strings with a single collection name.
", + "type": "string" + }, + "lockTimeout": { + "description": "an optional numeric value that can be used to set a timeout for waiting on collection locks. If not specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock.
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "optional arguments passed to action.
", + "type": "string" + }, + "waitForSync": { + "description": "an optional boolean flag that, if set, will force the transaction to write all data to disk before returning.
", + "format": "boolean", + "type": "boolean" + } + }, + "required": [ + "collections", + "action" + ], + "type": "object", + "x-filename": "Transactions - js/actions/api-transaction.js" + }, + "JSF_post_batch_replication": { + "properties": { + "ttl": { + "description": "the time-to-live for the new batch (in seconds)
A JSON object with the batch configuration.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "ttl" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_new_tasks": { + "properties": { + "command": { + "description": "The JavaScript code to be executed
", + "type": "string" + }, + "name": { + "description": "The name of the task
", + "type": "string" + }, + "offset": { + "description": "Number of seconds initial delay
", + "format": "int64", + "type": "integer" + }, + "params": { + "description": "The parameters to be passed into command
", + "type": "string" + }, + "period": { + "description": "number of seconds between the executions
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "name", + "command", + "params" + ], + "type": "object", + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "JSF_put_api_replication_applier_adjust": { + "properties": { + "adaptivePolling": { + "description": "if set to true, the replication applier will fall to sleep for an increasingly long period in case the logger server at the endpoint does not have any more replication events to apply. Using adaptive polling is thus useful to reduce the amount of work for both the applier and the logger server for cases when there are only infrequent changes. The downside is that when using adaptive polling, it might take longer for the replication applier to detect that there are new replication events on the logger server.
Setting adaptivePolling to false will make the replication applier contact the logger server in a constant interval, regardless of whether the logger server provides updates frequently or seldom.
", + "format": "", + "type": "boolean" + }, + "autoStart": { + "description": "whether or not to auto-start the replication applier on (next and following) server starts
", + "format": "", + "type": "boolean" + }, + "chunkSize": { + "description": "the requested maximum size for log transfer packets that is used when the endpoint is contacted.
", + "format": "int64", + "type": "integer" + }, + "connectTimeout": { + "description": "the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
", + "format": "int64", + "type": "integer" + }, + "database": { + "description": "the name of the database on the endpoint. If not specified, defaults to the current local database name.
", + "type": "string" + }, + "endpoint": { + "description": "the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "maxConnectRetries": { + "description": "the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
", + "format": "int64", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the endpoint.
", + "type": "string" + }, + "requestTimeout": { + "description": "the timeout (in seconds) for individual requests to the endpoint.
", + "format": "int64", + "type": "integer" + }, + "requireFromPresent": { + "description": "if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
", + "format": "", + "type": "boolean" + }, + "restrictCollections": { + "description": "the array of collections to include or exclude, based on the setting of restrictType
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "the configuration for restrictCollections; Has to be either include or exclude
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.
", + "type": "string" + }, + "verbose": { + "description": "if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "endpoint", + "database", + "password", + "maxConnectRetries", + "connectTimeout", + "requestTimeout", + "chunkSize", + "autoStart", + "adaptivePolling", + "includeSystem", + "requireFromPresent", + "verbose", + "restrictType" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_replication_makeSlave": { + "properties": { + "adaptivePolling": { + "description": "whether or not the replication applier will use adaptive polling.
", + "format": "", + "type": "boolean" + }, + "chunkSize": { + "description": "the requested maximum size for log transfer packets that is used when the endpoint is contacted.
", + "format": "int64", + "type": "integer" + }, + "connectTimeout": { + "description": "the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
", + "format": "int64", + "type": "integer" + }, + "database": { + "description": "the database name on the master (if not specified, defaults to the name of the local current database).
", + "type": "string" + }, + "endpoint": { + "description": "the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "maxConnectRetries": { + "description": "the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
", + "format": "int64", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the master.
", + "type": "string" + }, + "requestTimeout": { + "description": "the timeout (in seconds) for individual requests to the endpoint.
", + "format": "int64", + "type": "integer" + }, + "requireFromPresent": { + "description": "if set to true, then the replication applier will check at start of its continuous replication if the start tick from the dump phase is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
", + "format": "", + "type": "boolean" + }, + "restrictCollections": { + "description": "an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "an optional string value for collection filtering. When specified, the allowed values are include or exclude.
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the master.
", + "type": "string" + }, + "verbose": { + "description": "if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "endpoint", + "database", + "password", + "includeSystem", + "maxConnectRetries", + "connectTimeout", + "requestTimeout", + "chunkSize", + "adaptivePolling", + "requireFromPresent" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_api_replication_synchronize": { + "properties": { + "database": { + "description": "the database name on the master (if not specified, defaults to the name of the local current database).
", + "type": "string" + }, + "endpoint": { + "description": "the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied
", + "format": "", + "type": "boolean" + }, + "incremental": { + "description": "if set to true, then an incremental synchronization method will be used for synchronizing data in collections. This method is useful when collections already exist locally, and only the remaining differences need to be transferred from the remote endpoint. In this case, the incremental synchronization can be faster than a full synchronization. The default value is false, meaning that the complete data from the remote collection will be transferred.
", + "format": "", + "type": "boolean" + }, + "password": { + "description": "the password to use when connecting to the endpoint.
", + "type": "string" + }, + "restrictCollections": { + "description": "an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "an optional string value for collection filtering. When specified, the allowed values are include or exclude.
", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.
", + "type": "string" + } + }, + "required": [ + "endpoint", + "password" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "JSF_put_batch_replication": { + "properties": { + "ttl": { + "description": "the time-to-live for the new batch (in seconds)
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "ttl" + ], + "type": "object", + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "PostApiQueryProperties": { + "properties": { + "query": { + "description": "To validate a query string without executing it, the query string can be passed to the server via an HTTP POST request.
", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "PutApiQueryCacheProperties": { + "properties": { + "maxResults": { + "description": "the maximum number of query results that will be stored per database-specific cache.

", + "format": "int64", + "type": "integer" + }, + "mode": { + "description": " the mode the AQL query cache should operate in. Possible values are off, on or demand.
", + "type": "string" + } + }, + "required": [ + "mode", + "maxResults" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "PutApiQueryProperties": { + "properties": { + "enabled": { + "description": "If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
", + "format": "", + "type": "boolean" + }, + "maxQueryStringLength": { + "description": "The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
", + "format": "int64", + "type": "integer" + }, + "maxSlowQueries": { + "description": "The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
", + "format": "int64", + "type": "integer" + }, + "slowQueryThreshold": { + "description": "The threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
", + "format": "int64", + "type": "integer" + }, + "trackSlowQueries": { + "description": "If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
", + "format": "", + "type": "boolean" + } + }, + "required": [ + "enabled", + "trackSlowQueries", + "maxSlowQueries", + "slowQueryThreshold", + "maxQueryStringLength" + ], + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "RestLookupByKeys": { + "properties": { + "collection": { + "description": "The name of the collection to look in for the documents
", + "type": "string" + }, + "keys": { + "description": "array with the _keys of documents to remove.
", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "keys" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "RestRemoveByKeys": { + "properties": { + "collection": { + "description": "The name of the collection to look in for the documents to remove
", + "type": "string" + }, + "keys": { + "description": "array with the _keys of documents to remove.
", + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "$ref": "#/definitions/put_api_simple_remove_by_keys_opts" + } + }, + "required": [ + "collection", + "keys" + ], + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "collection_figures": { + "description": "metrics of the collection
", + "properties": { + "alive": { + "$ref": "#/definitions/collection_figures_alive" + }, + "attributes": { + "$ref": "#/definitions/collection_figures_attributes" + }, + "compactors": { + "$ref": "#/definitions/collection_figures_compactors" + }, + "datafiles": { + "$ref": "#/definitions/collection_figures_datafiles" + }, + "dead": { + "$ref": "#/definitions/collection_figures_dead" + }, + "indexes": { + "$ref": "#/definitions/collection_figures_indexes" + }, + "journals": { + "$ref": "#/definitions/collection_figures_journals" + }, + "maxTick": { + "description": "The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.
", + "format": "int64", + "type": "integer" + }, + "shapefiles": { + "$ref": "#/definitions/collection_figures_shapefiles" + }, + "shapes": { + "$ref": "#/definitions/collection_figures_shapes" + }, + "uncollectedLogfileEntries": { + "description": "The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "figures", + "alive", + "dead", + "datafiles", + "journals", + "compactors", + "shapefiles", + "shapes", + "attributes", + "indexes" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_alive": { + "description": "the currently active figures
", + "properties": { + "count": { + "description": "The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_attributes": { + "description": "", + "properties": { + "count": { + "description": "The total number of attributes used in the collection. Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size of the attribute data (in bytes). Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_compactors": { + "description": "
", + "properties": { + "count": { + "description": "The number of compactor files.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of all compactor files (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_datafiles": { + "description": "Metrics regarding the datafiles
", + "properties": { + "count": { + "description": "The number of datafiles.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of datafiles (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_dead": { + "description": "the items waiting to be swept away by the cleaner
", + "properties": { + "count": { + "description": "The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "deletion": { + "description": "The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size in bytes used by all dead documents.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_indexes": { + "description": "", + "properties": { + "count": { + "description": "The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total memory allocated for indexes in bytes.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_journals": { + "description": "Metrics regarding the journal files
", + "properties": { + "count": { + "description": "The number of journal files.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of all journal files (in bytes).
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_shapefiles": { + "description": "deprecated
", + "properties": { + "count": { + "description": "The number of shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 since ArangoDB 2.0 and higher.
", + "format": "int64", + "type": "integer" + }, + "fileSize": { + "description": "The total filesize of the shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 in ArangoDB 2.0 and higher.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "collection_figures_shapes": { + "description": "", + "properties": { + "count": { + "description": "The total number of shapes used in the collection. This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + }, + "size": { + "description": "The total size of all shapes (in bytes). This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
", + "format": "int64", + "type": "integer" + } + }, + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "explain_options": { + "description": "Options for the query
", + "properties": { + "allPlans": { + "description": "if set to true, all possible execution plans will be returned. The default is false, meaning only the optimal plan will be returned.
", + "type": "boolean" + }, + "maxNumberOfPlans": { + "description": "an optional maximum number of plans that the optimizer is allowed to generate. Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does.
", + "format": "int64", + "type": "integer" + }, + "optimizer.rules": { + "description": "an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules.
", + "format": "string", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put_api_simple_remove_by_example_opts": { + "description": "a json object which can contains following attributes:
", + "properties": { + "limit": { + "description": "an optional value that determines how many documents to delete at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be deleted.
", + "type": "string" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_remove_by_keys_opts": { + "description": "a json object which can contains following attributes:
", + "properties": { + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "string" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_replace_by_example_options": { + "description": "a json object which can contain following attributes
", + "properties": { + "limit": { + "description": "an optional value that determines how many documents to replace at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be replaced.

", + "type": "string" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + }, + "put_api_simple_update_by_example_options": { + "description": "a json object which can contains following attributes:
", + "properties": { + "keepNull": { + "description": "This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document.
", + "type": "string" + }, + "limit": { + "description": "an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated.
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
", + "type": "boolean" + } + }, + "type": "object", + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "info": { + "description": "ArangoDB REST API Interface", + "license": { + "name": "Apache License, Version 2.0" + }, + "title": "ArangoDB", + "version": "1.0" + }, + "paths": { + "/_admin/cluster-test": { + "delete": { + "description": "\n\nSee GET method.
", + "parameters": [], + "summary": " Delete cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "get": { + "description": "\n\n
Executes a cluster roundtrip from a coordinator to a DB server and back. This call only works in a coordinator node in a cluster. One can and should append an arbitrary path to the URL and the part after /_admin/cluster-test is used as the path of the HTTP request which is sent from the coordinator to a DB node. Likewise, any form data appended to the URL is forwarded in the request to the DB node. This handler takes care of all request types (see below) and uses the same request type in its request to the DB node.
The following HTTP headers are interpreted in a special way:
- X-Shard-ID: This specifies the ID of the shard to which the cluster request is sent and thus tells the system to which DB server to send the cluster request. Note that the mapping from the shard ID to the responsible server has to be defined in the agency under Current/ShardLocation/. One has to give this header, otherwise the system does not know where to send the request. - X-Client-Transaction-ID: the value of this header is taken as the client transaction ID for the request - X-Timeout: specifies a timeout in seconds for the cluster operation. If the answer does not arrive within the specified timeout, an corresponding error is returned and any subsequent real answer is ignored. The default if not given is 24 hours. - X-Synchronous-Mode: If set to true the test function uses synchronous mode, otherwise the default asynchronous operation mode is used. This is mainly for debugging purposes. - Host: This header is ignored and not forwarded to the DB server. - User-Agent: This header is ignored and not forwarded to the DB server.
All other HTTP headers and the body of the request (if present, see other HTTP methods below) are forwarded as given in the original request.
In asynchronous mode the DB server answers with an HTTP request of its own, in synchronous mode it sends a HTTP response. In both cases the headers and the body are used to produce the HTTP response of this API call.
", + "parameters": [], + "responses": { + "200": { + "description": "is returned when everything went well, or if a timeout occurred. In the latter case a body of type application/json indicating the timeout is returned.
" + }, + "403": { + "description": "is returned if ArangoDB is not running in cluster mode.
" + }, + "404": { + "description": "is returned if ArangoDB was not compiled for cluster operation.
" + } + }, + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "head": { + "description": "\n\nSee GET method.
", + "parameters": [], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "patch": { + "description": "free style json body\n\nSee GET method. The body can be any type and is simply forwarded.
", + "parameters": [ + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Update cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "post": { + "description": "free style json body\n\nSee GET method.
", + "parameters": [ + { + "description": "The body can be any type and is simply forwarded.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + }, + "put": { + "description": "free style json body\n\nSee GET method. The body can be any type and is simply forwarded.
", + "parameters": [ + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute cluster roundtrip", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterCheckPort": { + "get": { + "description": "\n\n
", + "parameters": [ + { + "description": "
", + "in": "query", + "name": "port", + "required": true, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "
" + } + }, + "summary": " Check port", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterDispatch": { + "post": { + "description": "**A json post document with these Properties is required:**
  • clusterPlan: is a cluster plan (see JSF_cluster_planner_POST),
  • action: can be one of the following: - \"launch\": the cluster is launched for the first time, all data directories and log files are cleaned and created - \"shutdown\": the cluster is shut down, the additional property runInfo (see below) must be bound as well - \"relaunch\": the cluster is launched again, all data directories and log files are untouched and need to be there already - \"cleanup\": use this after a shutdown to remove all data in the data directories and all log files, use with caution - \"isHealthy\": checks whether or not the processes involved in the cluster are running or not. The additional property runInfo (see above) must be bound as well - \"upgrade\": performs an upgrade of a cluster, to this end, the agency is started, and then every server is once started with the \"--upgrade\" option, and then normally. Finally, the script \"verion-check.js\" is run on one of the coordinators for the cluster.
  • runInfo: this is needed for the \"shutdown\" and \"isHealthy\" actions only and should be the structure that \"launch\", \"relaunch\" or \"upgrade\" returned. It contains runtime information like process IDs.
  • myname: is the ID of this dispatcher, this is used to decide which commands are executed locally and which are forwarded to other dispatchers
\n\nThe body must be an object with the following properties:
This call executes the plan by either doing the work personally or by delegating to other dispatchers.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_cluster_dispatcher_POST" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "went wrong with the startup.
" + } + }, + "summary": "execute startup commands", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterPlanner": { + "post": { + "description": "free style json body\n\nof a cluster and returns a JSON description of a plan to start up this cluster.
", + "parameters": [ + { + "description": "A cluster plan object
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "
" + } + }, + "summary": " Produce cluster startup plan", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/clusterStatistics": { + "get": { + "description": "\n\n
", + "parameters": [ + { + "description": "
", + "in": "query", + "name": "DBserver", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "
" + }, + "400": { + "description": "ID of a DBserver
" + }, + "403": { + "description": "
" + } + }, + "summary": " Queries statistics of DBserver", + "tags": [ + "Cluster" + ], + "x-examples": [], + "x-filename": "Cluster - js/actions/api-cluster.js" + } + }, + "/_admin/database/target-version": { + "get": { + "description": "\n\n
Returns the database-version that this server requires. The version is returned in the version attribute of the result.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned in all cases.
" + } + }, + "summary": " Return the required version of the database", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/echo": { + "get": { + "description": "\n\n
The call returns an object with the following attributes:
  • headers: object with HTTP headers received
  • requestType: the HTTP request method (e.g. GET)
  • parameters: object with URL parameters received
", + "parameters": [], + "responses": { + "200": { + "description": "Echo was returned successfully.
" + } + }, + "summary": " Return current request", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/execute": { + "post": { + "description": "free style json body\n\n
Executes the javascript code in the body on the server as the body of a function with no arguments. If you have a return statement then the return value you produce will be returned as content type application/json. If the parameter returnAsJSON is set to true, the result will be a JSON object describing the return value directly, otherwise a string produced by JSON.stringify will be returned.
", + "parameters": [ + { + "description": "The body to be executed.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Execute program", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/log": { + "get": { + "description": "\n\nReturns fatal, error, warning or info log messages from the server's global log. The result is a JSON object with the following attributes:
  • lid: a list of log entry identifiers. Each log message is uniquely identified by its lid and the identifiers are in ascending order.
  • level: a list of the log-levels for all log entries.
  • timestamp: a list of the timestamps as seconds since 1970-01-01 for all log entries.
  • text a list of the texts of all log entries
  • totalAmount: the total amount of log entries before pagination.
", + "parameters": [ + { + "description": "Returns all log entries up to log level upto. Note that upto must be:
  • fatal or 0
  • error or 1
  • warning or 2
  • info or 3
  • debug or 4 The default value is info.
", + "in": "query", + "name": "upto", + "required": false, + "type": "string" + }, + { + "description": "Returns all log entries of log level level. Note that the URL parameters upto and level are mutually exclusive.
", + "in": "query", + "name": "level", + "required": false, + "type": "string" + }, + { + "description": "Returns all log entries such that their log entry identifier (lid value) is greater or equal to start.
", + "in": "query", + "name": "start", + "required": false, + "type": "number" + }, + { + "description": "Restricts the result to at most size log entries.
", + "in": "query", + "name": "size", + "required": false, + "type": "number" + }, + { + "description": "Starts to return log entries skipping the first offset log entries. offset and size can be used for pagination.
", + "in": "query", + "name": "offset", + "required": false, + "type": "number" + }, + { + "description": "Only return the log entries containing the text specified in search.
", + "in": "query", + "name": "search", + "required": false, + "type": "string" + }, + { + "description": "Sort the log entries either ascending (if sort is asc) or descending (if sort is desc) according to their lid values. Note that the lid imposes a chronological order. The default value is asc.
", + "in": "query", + "name": "sort", + "required": false, + "type": "string" + } + ], + "responses": { + "400": { + "description": "is returned if invalid values are specified for upto or level.
" + }, + "403": { + "description": "is returned if the log is requested for any database other than _system.
" + }, + "500": { + "description": "is returned if the server cannot generate the result due to an out-of-memory error.
" + } + }, + "summary": " Read global log from the server", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/long_echo": { + "get": { + "description": "\n\n
The call returns an object with the following attributes:
  • headers: object with HTTP headers received
  • requestType: the HTTP request method (e.g. GET)
  • parameters: object with URL parameters received
", + "parameters": [], + "responses": { + "200": { + "description": "Echo was returned successfully.
" + } + }, + "summary": " Return current request and continues", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/routing/reload": { + "post": { + "description": "\n\n
Reloads the routing information from the collection routing.
", + "parameters": [], + "responses": { + "200": { + "description": "Routing information was reloaded successfully.
" + } + }, + "summary": " Reloads the routing information", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/server/role": { + "get": { + "description": "\n\n
Returns the role of a server in a cluster. The role is returned in the role attribute of the result. Possible return values for role are:
  • COORDINATOR: the server is a coordinator in a cluster
  • PRIMARY: the server is a primary database server in a cluster
  • SECONDARY: the server is a secondary database server in a cluster
  • UNDEFINED: in a cluster, UNDEFINED is returned if the server role cannot be determined. On a single server, UNDEFINED is the only possible return value.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned in all cases.
" + } + }, + "summary": " Return role of a server in a cluster", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/shutdown": { + "get": { + "description": "\n\nThis call initiates a clean shutdown sequence.
", + "parameters": [], + "responses": { + "200": { + "description": "is returned in all cases.
" + } + }, + "summary": " Initiate shutdown sequence", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/sleep": { + "get": { + "description": "\n\n
The call returns an object with the attribute duration. This takes as many seconds as the duration argument says.
", + "parameters": [ + { + "description": "wait `duration` seconds until the reply is sent.
", + "format": "integer", + "in": "path", + "name": "duration", + "required": true, + "type": "integer" + } + ], + "responses": { + "200": { + "description": "Sleep was conducted successfully.
" + } + }, + "summary": " Sleep for a specified amount of seconds", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/statistics": { + "get": { + "description": "\n\n
Returns the statistics information. The returned object contains the statistics figures grouped together according to the description returned by _admin/statistics-description. For instance, to access a figure userTime from the group system, you first select the sub-object describing the group stored in system and in that sub-object the value for userTime is stored in the attribute of the same name.
In case of a distribution, the returned object contains the total count in count and the distribution list in counts. The sum (or total) of the individual values is returned in sum.

Example:

shell> curl --dump - http://localhost:8529/_admin/statistics\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"time\" : 1443627584.140516, \n  \"system\" : { \n    \"minorPageFaults\" : 137584, \n    \"majorPageFaults\" : 5, \n    \"userTime\" : 36.03, \n    \"systemTime\" : 1.34, \n    \"numberOfThreads\" : 23, \n    \"residentSize\" : 192217088, \n    \"residentSizePercent\" : 0.022905696552235805, \n    \"virtualSize\" : 3688673280 \n  }, \n  \"client\" : { \n    \"httpConnections\" : 1, \n    \"connectionTime\" : { \n      \"sum\" : 0.00033211708068847656, \n      \"count\" : 1, \n      \"counts\" : [ \n        1, \n        0, \n        0, \n        0 \n      ] \n    }, \n    \"totalTime\" : { \n      \"sum\" : 26.437366724014282, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3204, \n        267, \n        60, \n        16, \n        3, \n        1, \n        4 \n      ] \n    }, \n    \"requestTime\" : { \n      \"sum\" : 14.136068344116211, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3297, \n        219, \n        26, \n        8, \n        3, \n        2, \n        0 \n      ] \n    }, \n    \"queueTime\" : { \n      \"sum\" : 0.09597921371459961, \n      \"count\" : 3526, \n      \"counts\" : [ \n        3526, \n        0, \n        0, \n        0, \n        0, \n        0, \n        0 \n      ] \n    }, \n    \"ioTime\" : { \n      \"sum\" : 12.205319166183472, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3438, \n        98, \n        12, \n        4, \n        0, \n        0, \n        3 \n      ] \n    }, \n    \"bytesSent\" : { \n      \"sum\" : 1578763, \n      \"count\" : 3555, \n      \"counts\" : [ \n        389, \n        2939, \n        15, \n        212, \n        0, \n        0 \n      ] \n    }, \n    \"bytesReceived\" : { \n      \"sum\" : 796270, \n      \"count\" : 3555, \n      \"counts\" : [ \n        3344, \n        211, \n        0, \n        0, \n        0, \n        0 \n      ] \n    } \n  }, \n  \"http\" : { \n    \"requestsTotal\" : 3567, \n    \"requestsAsync\" : 0, \n    \"requestsGet\" : 597, \n    \"requestsHead\" : 65, \n    \"requestsPost\" : 2652, \n    \"requestsPut\" : 110, \n    \"requestsPatch\" : 3, \n    \"requestsDelete\" : 139, \n    \"requestsOptions\" : 0, \n    \"requestsOther\" : 1 \n  }, \n  \"server\" : { \n    \"uptime\" : 47.32217192649841, \n    \"physicalMemory\" : 8391671808 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Statistics were returned successfully.
" + } + }, + "summary": " Read the statistics", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/statistics-description": { + "get": { + "description": "\n\n
Returns a description of the statistics returned by /_admin/statistics. The returned objects contains an array of statistics groups in the attribute groups and an array of statistics figures in the attribute figures.
A statistics group is described by
  • group: The identifier of the group.
  • name: The name of the group.
  • description: A description of the group.
A statistics figure is described by
  • group: The identifier of the group to which this figure belongs.
  • identifier: The identifier of the figure. It is unique within the group.
  • name: The name of the figure.
  • description: A description of the figure.
  • type: Either current, accumulated, or distribution.
  • cuts: The distribution vector.
  • units: Units in which the figure is measured.

Example:

shell> curl --dump - http://localhost:8529/_admin/statistics-description\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"groups\" : [ \n    { \n      \"group\" : \"system\", \n      \"name\" : \"Process Statistics\", \n      \"description\" : \"Statistics about the ArangoDB process\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"name\" : \"Client Connection Statistics\", \n      \"description\" : \"Statistics about the connections.\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"name\" : \"HTTP Request Statistics\", \n      \"description\" : \"Statistics about the HTTP requests.\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"name\" : \"Server Statistics\", \n      \"description\" : \"Statistics about the ArangoDB server\" \n    } \n  ], \n  \"figures\" : [ \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"userTime\", \n      \"name\" : \"User Time\", \n      \"description\" : \"Amount of time that this process has been scheduled in user mode, measured in seconds.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"systemTime\", \n      \"name\" : \"System Time\", \n      \"description\" : \"Amount of time that this process has been scheduled in kernel mode, measured in seconds.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"numberOfThreads\", \n      \"name\" : \"Number of Threads\", \n      \"description\" : \"Number of threads in the arangod process.\", \n      \"type\" : \"current\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"residentSize\", \n      \"name\" : \"Resident Set Size\", \n      \"description\" : \"The total size of the number of pages the process has in real memory. This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. The resident set size is reported in bytes.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"residentSizePercent\", \n      \"name\" : \"Resident Set Size\", \n      \"description\" : \"The percentage of physical memory used by the process as resident set size.\", \n      \"type\" : \"current\", \n      \"units\" : \"percent\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"virtualSize\", \n      \"name\" : \"Virtual Memory Size\", \n      \"description\" : \"On Windows, this figure contains the total amount of memory that the memory manager has committed for the arangod process. On other systems, this figure contains The size of the virtual memory the process is using.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"minorPageFaults\", \n      \"name\" : \"Minor Page Faults\", \n      \"description\" : \"The number of minor faults the process has made which have not required loading a memory page from disk. This figure is not reported on Windows.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"system\", \n      \"identifier\" : \"majorPageFaults\", \n      \"name\" : \"Major Page Faults\", \n      \"description\" : \"On Windows, this figure contains the total number of page faults. On other system, this figure contains the number of major faults the process has made which have required loading a memory page from disk.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"httpConnections\", \n      \"name\" : \"Client Connections\", \n      \"description\" : \"The number of connections that are currently open.\", \n      \"type\" : \"current\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"totalTime\", \n      \"name\" : \"Total Time\", \n      \"description\" : \"Total time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"requestTime\", \n      \"name\" : \"Request Time\", \n      \"description\" : \"Request time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"queueTime\", \n      \"name\" : \"Queue Time\", \n      \"description\" : \"Queue time needed to answer a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.01, \n        0.05, \n        0.1, \n        0.2, \n        0.5, \n        1 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"bytesSent\", \n      \"name\" : \"Bytes Sent\", \n      \"description\" : \"Bytes sents for a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        250, \n        1000, \n        2000, \n        5000, \n        10000 \n      ], \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"bytesReceived\", \n      \"name\" : \"Bytes Received\", \n      \"description\" : \"Bytes receiveds for a request.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        250, \n        1000, \n        2000, \n        5000, \n        10000 \n      ], \n      \"units\" : \"bytes\" \n    }, \n    { \n      \"group\" : \"client\", \n      \"identifier\" : \"connectionTime\", \n      \"name\" : \"Connection Time\", \n      \"description\" : \"Total connection time of a client.\", \n      \"type\" : \"distribution\", \n      \"cuts\" : [ \n        0.1, \n        1, \n        60 \n      ], \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsTotal\", \n      \"name\" : \"Total requests\", \n      \"description\" : \"Total number of HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsAsync\", \n      \"name\" : \"Async requests\", \n      \"description\" : \"Number of asynchronously executed HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsGet\", \n      \"name\" : \"HTTP GET requests\", \n      \"description\" : \"Number of HTTP GET requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsHead\", \n      \"name\" : \"HTTP HEAD requests\", \n      \"description\" : \"Number of HTTP HEAD requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPost\", \n      \"name\" : \"HTTP POST requests\", \n      \"description\" : \"Number of HTTP POST requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPut\", \n      \"name\" : \"HTTP PUT requests\", \n      \"description\" : \"Number of HTTP PUT requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsPatch\", \n      \"name\" : \"HTTP PATCH requests\", \n      \"description\" : \"Number of HTTP PATCH requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsDelete\", \n      \"name\" : \"HTTP DELETE requests\", \n      \"description\" : \"Number of HTTP DELETE requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsOptions\", \n      \"name\" : \"HTTP OPTIONS requests\", \n      \"description\" : \"Number of HTTP OPTIONS requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"http\", \n      \"identifier\" : \"requestsOther\", \n      \"name\" : \"other HTTP requests\", \n      \"description\" : \"Number of other HTTP requests.\", \n      \"type\" : \"accumulated\", \n      \"units\" : \"number\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"identifier\" : \"uptime\", \n      \"name\" : \"Server Uptime\", \n      \"description\" : \"Number of seconds elapsed since server start.\", \n      \"type\" : \"current\", \n      \"units\" : \"seconds\" \n    }, \n    { \n      \"group\" : \"server\", \n      \"identifier\" : \"physicalMemory\", \n      \"name\" : \"Physical Memory\", \n      \"description\" : \"Physical memory in bytes.\", \n      \"type\" : \"current\", \n      \"units\" : \"bytes\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Description was returned successfully.
" + } + }, + "summary": " Statistics description", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/test": { + "post": { + "description": "free style json body\n\n
Executes the specified tests on the server and returns an object with the test results. The object has an attribute \"error\" which states whether any error occurred. The object also has an attribute \"passed\" which indicates which tests passed and which did not.
", + "parameters": [ + { + "description": "A JSON object containing an attribute tests which lists the files containing the test suites.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "summary": " Runs tests on server", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/time": { + "get": { + "description": "\n\n
The call returns an object with the attribute time. This contains the current system time as a Unix timestamp with microsecond precision.
", + "parameters": [], + "responses": { + "200": { + "description": "Time was returned successfully.
" + } + }, + "summary": " Return system time", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_admin/wal/flush": { + "put": { + "description": "\n\n
Flushes the write-ahead log. By flushing the currently active write-ahead logfile, the data in it can be transferred to collection journals and datafiles. This is useful to ensure that all data for a collection is present in the collection journals and datafiles, for example, when dumping the data of a collection.
", + "parameters": [ + { + "description": "Whether or not the operation should block until the not-yet synchronized data in the write-ahead log was synchronized to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not the operation should block until the data in the flushed log has been collected by the write-ahead log garbage collector. Note that setting this option to true might block for a long time if there are long-running transactions and the write-ahead log garbage collector cannot finish garbage collection.
", + "in": "query", + "name": "waitForCollector", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Flushes the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_admin/wal/properties": { + "get": { + "description": "\n\n
Retrieves the configuration of the write-ahead log. The result is a JSON object with the following attributes:
  • allowOversizeEntries: whether or not operations that are bigger than a single logfile can be executed and stored
  • logfileSize: the size of each write-ahead logfile
  • historicLogfiles: the maximum number of historic logfiles to keep
  • reserveLogfiles: the maximum number of reserve logfiles that ArangoDB allocates in the background
  • syncInterval: the interval for automatic synchronization of not-yet synchronized write-ahead log data (in milliseconds)
  • throttleWait: the maximum wait time that operations will wait before they get aborted if case of write-throttling (in milliseconds)
  • throttleWhenPending: the number of unprocessed garbage-collection operations that, when reached, will activate write-throttling. A value of 0 means that write-throttling will not be triggered.

Example:

shell> curl --dump - http://localhost:8529/_admin/wal/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"allowOversizeEntries\" : true, \n  \"logfileSize\" : 33554432, \n  \"historicLogfiles\" : 10, \n  \"reserveLogfiles\" : 1, \n  \"syncInterval\" : 100, \n  \"throttleWait\" : 15000, \n  \"throttleWhenPending\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Retrieves the configuration of the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + }, + "put": { + "description": "\n\n
Configures the behavior of the write-ahead log. The body of the request must be a JSON object with the following attributes:
  • allowOversizeEntries: whether or not operations that are bigger than a single logfile can be executed and stored
  • logfileSize: the size of each write-ahead logfile
  • historicLogfiles: the maximum number of historic logfiles to keep
  • reserveLogfiles: the maximum number of reserve logfiles that ArangoDB allocates in the background
  • throttleWait: the maximum wait time that operations will wait before they get aborted if case of write-throttling (in milliseconds)
  • throttleWhenPending: the number of unprocessed garbage-collection operations that, when reached, will activate write-throttling. A value of 0 means that write-throttling will not be triggered.
Specifying any of the above attributes is optional. Not specified attributes will be ignored and the configuration for them will not be modified.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_admin/wal/properties <<EOF\n{ \n  \"logfileSize\" : 33554432, \n  \"allowOversizeEntries\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"allowOversizeEntries\" : true, \n  \"logfileSize\" : 33554432, \n  \"historicLogfiles\" : 10, \n  \"reserveLogfiles\" : 1, \n  \"syncInterval\" : 100, \n  \"throttleWait\" : 15000, \n  \"throttleWhenPending\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Configures the write-ahead log", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_admin/wal/transactions": { + "get": { + "description": "\n\n
Returns information about the currently running transactions. The result is a JSON object with the following attributes:
  • runningTransactions: number of currently running transactions
  • minLastCollected: minimum id of the last collected logfile (at the start of each running transaction). This is null if no transaction is running.
  • minLastSealed: minimum id of the last sealed logfile (at the start of each running transaction). This is null if no transaction is running.

Example:

shell> curl --dump - http://localhost:8529/_admin/wal/transactions\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"runningTransactions\" : 0, \n  \"minLastCollected\" : null, \n  \"minLastSealed\" : null, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the operation succeeds.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.

" + } + }, + "summary": " Returns information about the currently running transactions", + "tags": [ + "wal" + ], + "x-examples": [], + "x-filename": "wal - js/actions/_admin/wal/app.js" + } + }, + "/_api/aqlfunction": { + "get": { + "description": "\n\nReturns all registered AQL user functions.
The call will return a JSON array with all user functions found. Each user function will at least have the following attributes:
  • name: The fully qualified name of the user function
  • code: A string representation of the function body

Example:

shell> curl --dump - http://localhost:8529/_api/aqlfunction\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"name\" : \"myfunctions::temperature::celsiustofahrenheit\", \n    \"code\" : \"function (celsius) { return celsius * 1.8 + 32; }\" \n  } \n]\n

\n
", + "parameters": [ + { + "description": "Returns all registered AQL user functions from namespace namespace.
", + "in": "query", + "name": "namespace", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "if success HTTP 200 is returned.
" + } + }, + "summary": " Return registered AQL user functions", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • isDeterministic: an optional boolean value to indicate that the function results are fully deterministic (function return value solely depends on the input value and return value is the same for repeated calls with same input). The isDeterministic attribute is currently not used but may be used later for optimisations.
  • code: a string representation of the function body.
  • name: the fully qualified name of the user functions.
\n\n
In case of success, the returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/aqlfunction <<EOF\n{ \n  \"name\" : \"myfunctions::temperature::celsiustofahrenheit\", \n  \"code\" : \"function (celsius) { return celsius * 1.8 + 32; }\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_aqlfunction" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the function already existed and was replaced by the call, the server will respond with HTTP 200.
" + }, + "201": { + "description": "If the function can be registered by the server, the server will respond with HTTP 201.
" + }, + "400": { + "description": "If the JSON representation is malformed or mandatory data is missing from the request, the server will respond with HTTP 400.
" + } + }, + "summary": " Create AQL user function", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/aqlfunction/{name}": { + "delete": { + "description": "\n\n
Removes an existing AQL user function, identified by name.
In case of success, the returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example: deletes a function:

shell> curl -X DELETE --dump - http://localhost:8529/_api/aqlfunction/square::x::y\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: function not found:

shell> curl -X DELETE --dump - http://localhost:8529/_api/aqlfunction/myfunction::x::y\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1582, \n  \"errorMessage\" : \"user function '%s()' not found\" \n}\n

\n
", + "parameters": [ + { + "description": "the name of the AQL user function.
", + "format": "string", + "in": "path", + "name": "name", + "required": true, + "type": "string" + }, + { + "description": "If set to true, then the function name provided in name is treated as a namespace prefix, and all functions in the specified namespace will be deleted. If set to false, the function name provided in name must be fully qualified, including any namespaces.
", + "in": "query", + "name": "group", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the function can be removed by the server, the server will respond with HTTP 200.
" + }, + "400": { + "description": "If the user function name is malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "If the specified user user function does not exist, the server will respond with HTTP 404.
" + } + }, + "summary": " Remove existing AQL user function", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/batch": { + "post": { + "description": "free style json body\n\nExecutes a batch request. A batch request can contain any number of other requests that can be sent to ArangoDB in isolation. The benefit of using batch requests is that batching requests requires less client/server roundtrips than when sending isolated requests.
All parts of a batch request are executed serially on the server. The server will return the results of all parts in a single response when all parts are finished.
Technically, a batch request is a multipart HTTP request, with content-type `multipart/form-data`. A batch request consists of an envelope and the individual batch part actions. Batch part actions are \"regular\" HTTP requests, including full header and an optional body. Multiple batch parts are separated by a boundary identifier. The boundary identifier is declared in the batch envelope. The MIME content-type for each individual batch part must be `application/x-arango-batchpart`.
Please note that when constructing the individual batch parts, you must use CRLF (`\\r\\n`) as the line terminator as in regular HTTP messages.
The response sent by the server will be an `HTTP 200` response, with an optional error summary header `x-arango-errors`. This header contains the number of batch part operations that failed with an HTTP error code of at least 400. This header is only present in the response if the number of errors is greater than zero.
The response sent by the server is a multipart response, too. It contains the individual HTTP responses for all batch parts, including the full HTTP result header (with status code and other potential headers) and an optional result body. The individual batch parts in the result are seperated using the same boundary value as specified in the request.
The order of batch parts in the response will be the same as in the original client request. Client can additionally use the `Content-Id` MIME header in a batch part to define an individual id for each batch part. The server will return this id is the batch part responses, too.

Example: Sending a batch request with five batch parts:
  • GET /_api/version
  • DELETE /_api/collection/products
  • POST /_api/collection/products
  • GET /_api/collection/products/figures
  • DELETE /_api/collection/products
The boundary (`SomeBoundaryValue`) is passed to the server in the HTTP `Content-Type` HTTP header. Please note the reply is not displayed all accurate.


shell> curl -X POST --header 'Content-Type: multipart/form-data; boundary=SomeBoundaryValue' --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: myId1\r\n\r\nGET /_api/version HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: myId2\r\n\r\nDELETE /_api/collection/products HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: someId\r\n\r\nPOST /_api/collection/products HTTP/1.1\r\n\r\n{ \"name\": \"products\" }\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: nextId\r\n\r\nGET /_api/collection/products/figures HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\nContent-Id: otherId\r\n\r\nDELETE /_api/collection/products HTTP/1.1\r\n--SomeBoundaryValue--\r\n\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: multipart/form-data; boundary=SomeBoundaryValue\nx-arango-errors: 1\n\n\"--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: myId1\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 43\\r\\n\\r\\n{\\\"server\\\":\\\"arango\\\",\\\"version\\\":\\\"2.7.0-devel\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: myId2\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 88\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'products'\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: someId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nLocation: /_db/_system/_api/collection/products\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 137\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"name\\\":\\\"products\\\",\\\"waitForSync\\\":false,\\\"isVolatile\\\":false,\\\"isSystem\\\":false,\\\"status\\\":3,\\\"type\\\":2,\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: nextId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nLocation: /_db/_system/_api/collection/products/figures\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 635\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"name\\\":\\\"products\\\",\\\"isSystem\\\":false,\\\"doCompact\\\":true,\\\"isVolatile\\\":false,\\\"journalSize\\\":1048576,\\\"keyOptions\\\":{\\\"type\\\":\\\"traditional\\\",\\\"allowUserKeys\\\":true},\\\"waitForSync\\\":false,\\\"indexBuckets\\\":8,\\\"count\\\":0,\\\"figures\\\":{\\\"alive\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"dead\\\":{\\\"count\\\":0,\\\"size\\\":0,\\\"deletion\\\":0},\\\"datafiles\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"journals\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"compactors\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"shapefiles\\\":{\\\"count\\\":0,\\\"fileSize\\\":0},\\\"shapes\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"attributes\\\":{\\\"count\\\":0,\\\"size\\\":0},\\\"indexes\\\":{\\\"count\\\":1,\\\"size\\\":16064},\\\"lastTick\\\":\\\"0\\\",\\\"uncollectedLogfileEntries\\\":0},\\\"status\\\":3,\\\"type\\\":2,\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\nContent-Id: otherId\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 43\\r\\n\\r\\n{\\\"id\\\":\\\"619502023\\\",\\\"error\\\":false,\\\"code\\\":200}\\r\\n--SomeBoundaryValue--\"\n

\n
Example: Sending a batch request, setting the boundary implicitly (the server will in this case try to find the boundary at the beginning of the request body).

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/batch <<EOF\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\n\r\nDELETE /_api/collection/notexisting1 HTTP/1.1\r\n\r\n--SomeBoundaryValue\r\nContent-Type: application/x-arango-batchpart\r\n\r\nDELETE /_api/collection/notexisting2 HTTP/1.1\r\n--SomeBoundaryValue--\r\n\nEOF\n\nHTTP/1.1 200 OK\nx-arango-errors: 2\n\n\"--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 92\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'notexisting1'\\\"}\\r\\n--SomeBoundaryValue\\r\\nContent-Type: application/x-arango-batchpart\\r\\n\\r\\nHTTP/1.1 404 Not Found\\r\\nContent-Type: application/json; charset=utf-8\\r\\nContent-Length: 92\\r\\n\\r\\n{\\\"error\\\":true,\\\"code\\\":404,\\\"errorNum\\\":1203,\\\"errorMessage\\\":\\\"unknown collection 'notexisting2'\\\"}\\r\\n--SomeBoundaryValue--\"\n

\n
", + "parameters": [ + { + "description": "The multipart batch request, consisting of the envelope and the individual batch parts.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "is returned if the batch was received successfully. HTTP 200 is returned even if one or multiple batch part actions failed.
" + }, + "400": { + "description": "is returned if the batch envelope is malformed or incorrectly formatted. This code will also be returned if the content-type of the overall batch request or the individual MIME parts is not as expected.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": "executes a batch request", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/collection": { + "get": { + "description": "\n\nReturns an object with an attribute collections containing an array of all collection descriptions. The same information is also available in the names as an object with the collection names as keys.
By providing the optional URL parameter excludeSystem with a value of true, all system collections will be excluded from the response.

Example: Return information about all collections:

shell> curl --dump - http://localhost:8529/_api/collection\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"id\" : \"6216135\", \n      \"name\" : \"_queues\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"5757383\", \n      \"name\" : \"_configuration\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"22206919\", \n      \"name\" : \"animals\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"14145991\", \n      \"name\" : \"_sessions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2087367\", \n      \"name\" : \"_graphs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2480583\", \n      \"name\" : \"_cluster_kickstarter_plans\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"252359\", \n      \"name\" : \"_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"14866887\", \n      \"name\" : \"_system_users_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4577735\", \n      \"name\" : \"_statisticsRaw\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2349511\", \n      \"name\" : \"_routing\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"6347207\", \n      \"name\" : \"_jobs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"7199175\", \n      \"name\" : \"_apps\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4970951\", \n      \"name\" : \"_statistics\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"5364167\", \n      \"name\" : \"_statistics15\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"21354951\", \n      \"name\" : \"demo\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"4446663\", \n      \"name\" : \"_aqlfunctions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    { \n      \"id\" : \"2218439\", \n      \"name\" : \"_modules\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    } \n  ], \n  \"names\" : { \n    \"_queues\" : { \n      \"id\" : \"6216135\", \n      \"name\" : \"_queues\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_configuration\" : { \n      \"id\" : \"5757383\", \n      \"name\" : \"_configuration\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"animals\" : { \n      \"id\" : \"22206919\", \n      \"name\" : \"animals\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_sessions\" : { \n      \"id\" : \"14145991\", \n      \"name\" : \"_sessions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_graphs\" : { \n      \"id\" : \"2087367\", \n      \"name\" : \"_graphs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_cluster_kickstarter_plans\" : { \n      \"id\" : \"2480583\", \n      \"name\" : \"_cluster_kickstarter_plans\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_users\" : { \n      \"id\" : \"252359\", \n      \"name\" : \"_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_system_users_users\" : { \n      \"id\" : \"14866887\", \n      \"name\" : \"_system_users_users\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statisticsRaw\" : { \n      \"id\" : \"4577735\", \n      \"name\" : \"_statisticsRaw\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_routing\" : { \n      \"id\" : \"2349511\", \n      \"name\" : \"_routing\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_jobs\" : { \n      \"id\" : \"6347207\", \n      \"name\" : \"_jobs\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_apps\" : { \n      \"id\" : \"7199175\", \n      \"name\" : \"_apps\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statistics\" : { \n      \"id\" : \"4970951\", \n      \"name\" : \"_statistics\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_statistics15\" : { \n      \"id\" : \"5364167\", \n      \"name\" : \"_statistics15\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"demo\" : { \n      \"id\" : \"21354951\", \n      \"name\" : \"demo\", \n      \"isSystem\" : false, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_aqlfunctions\" : { \n      \"id\" : \"4446663\", \n      \"name\" : \"_aqlfunctions\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    }, \n    \"_modules\" : { \n      \"id\" : \"2218439\", \n      \"name\" : \"_modules\", \n      \"isSystem\" : true, \n      \"status\" : 3, \n      \"type\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "Whether or not system collections should be excluded from the result.
", + "in": "query", + "name": "excludeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "The list of collections
" + } + }, + "summary": "reads all collections", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • journalSize: The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
  • keyOptions: additional options for key generation. If specified, then keyOptions should be a JSON array containing the following attributes:
    • allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator will solely be responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
    • type: specifies the type of the key generator. The currently available generators are traditional and autoincrement.
    • increment: increment value for autoincrement key generator. Not used for other key generator types.
    • offset: Initial offset value for autoincrement key generator. Not used for other key generator types.
  • name: The name of the collection.
  • waitForSync: If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
  • doCompact: whether or not the collection will be compacted (default is true)
  • isVolatile: If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
  • shardKeys: (The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
  • numberOfShards: (The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
  • isSystem: If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
  • type: (The default is 2): the type of the collection to create. The following values for type are valid:
    • 2: document collection
    • 3: edges collection
  • indexBuckets: The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
    For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
\n\nCreates an new collection with a given name. The request must contain an object with the following attributes.


Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionBasics\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionBasics\n\n{ \n  \"id\" : \"619895239\", \n  \"name\" : \"testCollectionBasics\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionEdges\", \n  \"type\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionEdges\n\n{ \n  \"id\" : \"620026311\", \n  \"name\" : \"testCollectionEdges\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 3, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \"testCollectionUsers\", \n  \"keyOptions\" : { \n    \"type\" : \"autoincrement\", \n    \"increment\" : 5, \n    \"allowUserKeys\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/testCollectionUsers\n\n{ \n  \"id\" : \"620288455\", \n  \"name\" : \"testCollectionUsers\", \n  \"waitForSync\" : false, \n  \"isVolatile\" : false, \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_collection" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.

" + } + }, + "summary": " Create collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}": { + "delete": { + "description": "\n\nDrops the collection identified by collection-name.
If the collection was successfully dropped, an object is returned with the following attributes:
  • error: false
  • id: The identifier of the dropped collection.

Example: Using an identifier:

shell> curl -X DELETE --dump - http://localhost:8529/_api/collection/620485063\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620485063\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a name:

shell> curl -X DELETE --dump - http://localhost:8529/_api/collection/products1\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620681671\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection to drop.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Drops collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "get": { + "description": "\n\nThe result is an object describing the collection with the following attributes:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • status: The status of the collection as number. - 1: new born collection - 2: unloaded - 3: loaded - 4: in the process of being unloaded - 5: deleted - 6: loading
Every other status indicates a corrupted collection.
  • type: The type of the collection as number. - 2: document collection (normal case) - 3: edges collection
  • isSystem: If true then the collection is a system collection.
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return information about a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/checksum": { + "get": { + "description": "\n\nWill calculate a checksum of the meta-data (keys and optionally revision ids) and optionally the document data in the collection.
The checksum can be used to compare if two collections on different ArangoDB instances contain the same contents. The current revision of the collection is returned too so one can make sure the checksums are calculated for the same state of data.
By default, the checksum will only be calculated on the _key system attribute of the documents contained in the collection. For edge collections, the system attributes _from and _to will also be included in the calculation.
By setting the optional URL parameter withRevisions to true, then revision ids (_rev system attributes) are included in the checksumming.
By providing the optional URL parameter withData with a value of true, the user-defined document attributes will be included in the calculation too. Note: Including user-defined attributes will make the checksumming slower.
The response is a JSON object with the following attributes:
  • checksum: The calculated checksum as a number.
  • revision: The collection revision id as a string.
Note: this method is not available in a cluster.

Example: Retrieving the checksum of a collection:

shell> curl --dump - http://localhost:8529/_api/collection/products/checksum\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"620878279\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"checksum\" : 2335626498, \n  \"revision\" : \"621205959\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the checksum of a collection including the collection data, but not the revisions:

shell> curl --dump - http://localhost:8529/_api/collection/products/checksum?withRevisions=false&withData=true\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"621468103\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"checksum\" : 1042110547, \n  \"revision\" : \"621795783\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "description": "Whether or not to include document revision ids in the checksum calculation.
", + "in": "query", + "name": "withRevisions", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to include document body data in the checksum calculation.
", + "in": "query", + "name": "withData", + "required": false, + "type": "boolean" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return checksum for the collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/count": { + "get": { + "description": "\n\nIn addition to the above, the result also contains the number of documents. Note that this will always load the collection into memory.
  • count: The number of documents inside the collection.

Example: Requesting the number of documents:

shell> curl --dump - http://localhost:8529/_api/collection/products/count\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/count\n\n{ \n  \"id\" : \"622057927\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"count\" : 100, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return number of documents in a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/figures": { + "get": { + "description": "\n\nIn addition to the above, the result also contains the number of documents and additional statistical information about the collection. Note : This will always load the collection into memory.
Note: collection data that are stored in the write-ahead log only are not reported in the results. When the write-ahead log is collected, documents might be added to journals and datafiles of the collection, which may modify the figures of the collection.
Additionally, the filesizes of collection and index parameter JSON files are not reported. These files should normally have a size of a few bytes each. Please also note that the fileSize values are reported in bytes and reflect the logical file sizes. Some filesystems may use optimisations (e.g. sparse files) so that the actual physical file size is somewhat different. Directories and sub-directories may also require space in the file system, but this space is not reported in the fileSize results.
That means that the figures reported do not reflect the actual disk usage of the collection with 100% accuracy. The actual disk usage of a collection is normally slightly higher than the sum of the reported fileSize values. Still the sum of the fileSize values can still be used as a lower bound approximation of the disk usage.
**A json document with these Properties is returned:**
  • count: The number of documents currently present in the collection.
  • journalSize: The maximal size of a journal or datafile in bytes.
  • figures: metrics of the collection
    • datafiles: Metrics regarding the datafiles
      • count: The number of datafiles.
      • fileSize: The total filesize of datafiles (in bytes).
    • uncollectedLogfileEntries: The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
    • compactors:
      • count: The number of compactor files.
      • fileSize: The total filesize of all compactor files (in bytes).
    • dead: the items waiting to be swept away by the cleaner
      • count: The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
      • deletion: The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
      • size: The total size in bytes used by all dead documents.
    • indexes:
      • count: The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
      • size: The total memory allocated for indexes in bytes.
    • shapes:
      • count: The total number of shapes used in the collection. This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size of all shapes (in bytes). This includes shapes that are not in use anymore. Shapes that are contained in the write-ahead log only are not reported in this figure.
    • alive: the currently active figures
      • count: The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
    • attributes:
      • count: The total number of attributes used in the collection. Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
      • size: The total size of the attribute data (in bytes). Note: the value includes data of attributes that are not in use anymore. Attributes that are contained in the write-ahead log only are not reported in this figure.
    • shapefiles: deprecated
      • count: The number of shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 since ArangoDB 2.0 and higher.
      • fileSize: The total filesize of the shape files. This value is deprecated and kept for compatibility reasons only. The value will always be 0 in ArangoDB 2.0 and higher.
    • journals: Metrics regarding the journal files
      • count: The number of journal files.
      • fileSize: The total filesize of all journal files (in bytes).
    • maxTick: The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.

Example: Using an identifier and requesting the figures of the collection:

shell> curl --dump - http://localhost:8529/_api/collection/products/figures\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/figures\n\n{ \n  \"id\" : \"642111943\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : false, \n  \"indexBuckets\" : 8, \n  \"count\" : 1, \n  \"figures\" : { \n    \"alive\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"dead\" : { \n      \"count\" : 0, \n      \"size\" : 0, \n      \"deletion\" : 0 \n    }, \n    \"datafiles\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"journals\" : { \n      \"count\" : 1, \n      \"fileSize\" : 1048576 \n    }, \n    \"compactors\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"shapefiles\" : { \n      \"count\" : 0, \n      \"fileSize\" : 0 \n    }, \n    \"shapes\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"attributes\" : { \n      \"count\" : 0, \n      \"size\" : 0 \n    }, \n    \"indexes\" : { \n      \"count\" : 1, \n      \"size\" : 16120 \n    }, \n    \"lastTick\" : \"642505159\", \n    \"uncollectedLogfileEntries\" : 1 \n  }, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Returns information about the collection:
", + "schema": { + "$ref": "#/definitions/JSA_get_api_collection_figures_rc_200" + }, + "x-description-offset": 1458 + }, + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return statistics for a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/load": { + "put": { + "description": "\n\nLoads a collection into memory. Returns the collection on success.
The request body object might optionally contain the following attribute:
  • count: If set, this controls whether the return value should include the number of documents in the collection. Setting count to false may speed up loading a collection. The default value for count is true.
On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • count: The number of documents inside the collection. This is only returned if the count input parameters is set to true or has not been specified.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/load\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644078023\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"count\" : 0, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Load collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/properties": { + "get": { + "description": "\n\nIn addition to the above, the result will always contain the waitForSync, doCompact, journalSize, and isVolatile attributes. This is achieved by forcing a load of the underlying collection.
  • waitForSync: If true then creating, changing or removing documents will wait until the data has been synchronized to disk.
  • doCompact: Whether or not the collection will be compacted.
  • journalSize: The maximal size setting for journals / datafiles in bytes.
  • keyOptions: JSON object which contains key generation options: - type: specifies the type of the key generator. The currently available generators are traditional and autoincrement. - allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator is solely responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
  • isVolatile: If true then the collection data will be kept in memory only and ArangoDB will not write or sync the data to disk.
In a cluster setup, the result will also contain the following attributes:
  • numberOfShards: the number of shards of the collection.
  • shardKeys: contains the names of document attributes that are used to determine the target shard for documents.

Example: Using an identifier:

shell> curl --dump - http://localhost:8529/_api/collection/643422663/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/properties\n\n{ \n  \"id\" : \"643422663\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a name:

shell> curl --dump - http://localhost:8529/_api/collection/products/properties\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nlocation: /_db/_system/_api/collection/products/properties\n\n{ \n  \"id\" : \"643619271\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Read properties of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + "put": { + "description": "\n\nChanges the properties of a collection. Expects an object with the attribute(s)
  • waitForSync: If true then creating or changing a document will wait until the data has been synchronized to disk.
  • journalSize: The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected.
On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • waitForSync: The new value.
  • journalSize: The new value.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.
  • isVolatile: If true then the collection data will be kept in memory only and ArangoDB will not write or sync the data to disk.
  • doCompact: Whether or not the collection will be compacted.
  • keyOptions: JSON object which contains key generation options: - type: specifies the type of the key generator. The currently available generators are traditional and autoincrement. - allowUserKeys: if set to true, then it is allowed to supply own key values in the _key attribute of a document. If set to false, then the key generator is solely responsible for generating keys and supplying own key values in the _key attribute of documents is considered an error.
Note: some other collection properties, such as type, isVolatile, numberOfShards or shardKeys cannot be changed once a collection is created.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/properties <<EOF\n{ \n  \"waitForSync\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644340167\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"doCompact\" : true, \n  \"isVolatile\" : false, \n  \"journalSize\" : 1048576, \n  \"keyOptions\" : { \n    \"type\" : \"traditional\", \n    \"allowUserKeys\" : true \n  }, \n  \"waitForSync\" : true, \n  \"indexBuckets\" : 8, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Change properties of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/rename": { + "put": { + "description": "\n\nRenames a collection. Expects an object with the attribute(s)
  • name: The new name.
If returns an object with the attributes
  • id: The identifier of the collection.
  • name: The new name of the collection.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products1/rename <<EOF\n{ \n  \"name\" : \"newname\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644602311\", \n  \"name\" : \"newname\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection to rename.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned." + } + }, + "summary": " Rename collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/revision": { + "get": { + "description": "\n\nIn addition to the above, the result will also contain the collection's revision id. The revision id is a server-generated string that clients can use to check whether data in a collection has changed since the last revision check.
  • revision: The collection revision id as a string.

Example: Retrieving the revision of a collection

shell> curl --dump - http://localhost:8529/_api/collection/products/revision\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"643815879\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"revision\" : \"0\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Return collection revision id", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/rotate": { + "put": { + "description": "\n\nRotates the journal of a collection. The current journal of the collection will be closed and made a read-only datafile. The purpose of the rotate method is to make the data in the file available for compaction (compaction is only performed for read-only datafiles, and not for journals).
Saving new data in the collection subsequently will create a new journal file automatically if there is no current journal.
If returns an object with the attributes
  • result: will be true if rotation succeeded
Note: This method is not available in a cluster.

Example: Rotating the journal:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF\n{ \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Rotating if no journal exists:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/collection/products/rotate <<EOF\n{ \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1105, \n  \"errorMessage\" : \"could not rotate journal: no journal\" \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection currently has no journal, HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Rotate journal of a collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/truncate": { + "put": { + "description": "\n\nRemoves all documents from the collection, but leaves the indexes intact.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/truncate\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"644864455\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 3, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Truncate collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/collection/{collection-name}/unload": { + "put": { + "description": "\n\nRemoves a collection from memory. This call does not delete any documents. You can use the collection afterwards; in which case it will be loaded into memory, again. On success an object with the following attributes is returned:
  • id: The identifier of the collection.
  • name: The name of the collection.
  • status: The status of the collection as number.
  • type: The collection type. Valid types are: - 2: document collection - 3: edges collection
  • isSystem: If true then the collection is a system collection.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/collection/products/unload\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"645126599\", \n  \"name\" : \"products\", \n  \"isSystem\" : false, \n  \"status\" : 2, \n  \"type\" : 2, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "
", + "format": "string", + "in": "path", + "name": "collection-name", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "If the collection-name is missing, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Unload collection", + "tags": [ + "Collections" + ], + "x-examples": [], + "x-filename": "Collections - js/actions/_api/collection/app.js" + } + }, + "/_api/cursor": { + "post": { + "description": "**A json post document with these Properties is required:**
  • count: indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result. Calculating the \"count\" attribute might in the future have a performance impact for some queries so this option is turned off by default, and \"count\" is only returned when requested.
  • ttl: The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
  • batchSize: maximum number of result documents to be transferred from the server to the client in one roundtrip. If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed.
  • cache: flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup will be skipped for the query. If set to true, it will lead to the query cache being checked for the query if the query cache mode is either on or demand.
  • bindVars: list of bind parameter objects. of type object
  • query: contains the query string to be executed
  • options: key/value object with extra options for the query.
    • profile: if set to true, then the additional query profiling information will be returned in the extra.stats return attribute if the query result is not served from the query cache.
    • optimizer.rules: a list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules. of type string
    • fullCount: if set to true and the query contains a LIMIT clause, then the result will contain an extra attribute extra with a sub-attribute fullCount. This sub-attribute will contain the number of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and thus make queries run longer. Note that the fullCount sub-attribute will only be present in the result if the query has a LIMIT clause and the LIMIT clause is actually used in the query.
    • maxPlans: limits the maximum number of plans that are created by the AQL query optimizer.
\n\nThe query details include the query string plus optional query options and bind parameters. These values need to be passed in a JSON representation in the body of the POST request.
**A json document with these Properties is returned:**
  • count: the total number of result documents available (only available if the query was executed with the count attribute set)
  • code: the HTTP status code
  • extra: an optional JSON object with extra information about the query result contained in its stats sub-attribute. For data-modification queries, the extra.stats sub-attribute will contain the number of modified documents and the number of documents that could not be modified due to an error (if ignoreErrors query option is specified)
  • cached: a boolean flag indicating whether the query result was served from the query cache or not. If the query result is served from the query cache, the extra return attribute will not contain any stats sub-attribute and no profile sub-attribute.
  • hasMore: A boolean indicator whether there are more results available for the cursor on the server
  • result: an array of result documents (might be empty if query has no results) anonymous json object
  • error: A flag to indicate that an error occurred (false in this case)
  • id: id of temporary cursor created on the server (optional, see above)
  • errorMessage: a descriptive error message
    If the query specification is complete, the server will process the query. If an error occurs during query processing, the server will respond with HTTP 400. Again, the body of the response will contain details about the error.
    A list of query errors can be found (../ArangoErrors/README.md) here.

  • errorNum: the server error number
  • code: the HTTP status code
  • error: boolean flag to indicate that an error occurred (true in this case)

Example: Execute a query and extract the result in a single go

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 2 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello2\" : \"world1\", \n      \"_id\" : \"products/648862151\", \n      \"_rev\" : \"648862151\", \n      \"_key\" : \"648862151\" \n    }, \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/648534471\", \n      \"_rev\" : \"648534471\", \n      \"_key\" : \"648534471\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 2, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a query and extract a part of the result

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/647223751\", \n      \"_rev\" : \"647223751\", \n      \"_key\" : \"647223751\" \n    }, \n    { \n      \"hello5\" : \"world1\", \n      \"_id\" : \"products/647879111\", \n      \"_rev\" : \"647879111\", \n      \"_key\" : \"647879111\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"648075719\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Using the query option \"fullCount\"

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..1000 FILTER i > 500 LIMIT 10 RETURN i\", \n  \"count\" : true, \n  \"options\" : { \n    \"fullCount\" : true \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    501, \n    502, \n    503, \n    504, \n    505, \n    506, \n    507, \n    508, \n    509, \n    510 \n  ], \n  \"hasMore\" : false, \n  \"count\" : 10, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 500, \n      \"fullCount\" : 500 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Enabling and disabling optimizer rules

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 LET a = 1 LET b = 2 FILTER a + b == 3 RETURN i\", \n  \"count\" : true, \n  \"options\" : { \n    \"maxPlans\" : 1, \n    \"optimizer\" : { \n      \"rules\" : [ \n        \"-all\", \n        \"+remove-unnecessary-filters\" \n      ] \n    } \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    1, \n    2, \n    3, \n    4, \n    5, \n    6, \n    7, \n    8, \n    9, \n    10 \n  ], \n  \"hasMore\" : false, \n  \"count\" : 10, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a data-modification query and retrieve the number of modified documents

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products REMOVE p IN products\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ ], \n  \"hasMore\" : false, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 2, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 2, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Execute a data-modification query with option ignoreErrors

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"REMOVE 'bar' IN products OPTIONS { ignoreErrors: true }\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ ], \n  \"hasMore\" : false, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 1, \n      \"scannedFull\" : 0, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Bad query - Missing body

shell> curl -X POST --dump - http://localhost:8529/_api/cursor\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting atom, got end-of-file\", \n  \"code\" : 400, \n  \"errorNum\" : 600 \n}\n

\n
Example: Bad query - Unknown collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR u IN unknowncoll LIMIT 2 RETURN u\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection not found (unknowncoll)\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Bad query - Execute a data-modification query that attempts to remove a non-existing document

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"REMOVE 'foo' IN products\" \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found (while executing)\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor" + }, + "x-description-offset": 59 + } + ], + "produces": [ + "application/json" + ], + "responses": { + "201": { + "description": "is returned if the result set can be created by the server.
", + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor_rc_201" + }, + "x-description-offset": 300 + }, + "400": { + "description": "is returned if the JSON representation is malformed or the query specification is missing from the request.
If the JSON representation is malformed or the query specification is missing from the request, the server will respond with HTTP 400.
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
", + "schema": { + "$ref": "#/definitions/JSF_post_api_cursor_rc_400" + }, + "x-description-offset": 354 + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + } + }, + "summary": " Create cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + } + }, + "/_api/cursor/{cursor-identifier}": { + "delete": { + "description": "\n\nDeletes the cursor and frees the resources associated with it.
The cursor will automatically be destroyed on the server when the client has retrieved all documents from it. The client can also explicitly destroy the cursor at any earlier time using an HTTP DELETE request. The cursor id must be included as part of the URL.
Note: the server will also destroy abandoned cursors automatically after a certain server-controlled timeout to avoid resource leakage.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/650172871\", \n      \"_rev\" : \"650172871\", \n      \"_key\" : \"650172871\" \n    }, \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/649517511\", \n      \"_rev\" : \"649517511\", \n      \"_key\" : \"649517511\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"651024839\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/cursor/651024839\n\n

\n
", + "parameters": [ + { + "description": "The id of the cursor
", + "format": "string", + "in": "path", + "name": "cursor-identifier", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "is returned if the server is aware of the cursor.
" + }, + "404": { + "description": "is returned if the server is not aware of the cursor. It is also returned if a cursor is used after it has been destroyed.
" + } + }, + "summary": " Delete cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + }, + "put": { + "description": "\n\n
If the cursor is still alive, returns an object with the following attributes:
  • id: the cursor-identifier
  • result: a list of documents for the current batch
  • hasMore: false if this was the last batch
  • count: if present the total number of elements
Note that even if hasMore returns true, the next call might still return no documents. If, however, hasMore is false, then the cursor is exhausted. Once the hasMore attribute has a value of false, the client can stop.

Example: Valid request for next batch

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR p IN products LIMIT 5 RETURN p\", \n  \"count\" : true, \n  \"batchSize\" : 2 \n}\nEOF\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/cursor/655481287\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"hello1\" : \"world1\", \n      \"_id\" : \"products/653973959\", \n      \"_rev\" : \"653973959\", \n      \"_key\" : \"653973959\" \n    }, \n    { \n      \"hello3\" : \"world1\", \n      \"_id\" : \"products/654629319\", \n      \"_rev\" : \"654629319\", \n      \"_key\" : \"654629319\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"655481287\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Missing identifier

shell> curl -X PUT --dump - http://localhost:8529/_api/cursor\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting PUT /_api/cursor/<cursor-id>\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
Example: Unknown identifier

shell> curl -X PUT --dump - http://localhost:8529/_api/cursor/123123\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cursor not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1600 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the cursor
", + "format": "string", + "in": "path", + "name": "cursor-identifier", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 in case of success.
" + }, + "400": { + "description": "If the cursor identifier is omitted, the server will respond with HTTP 404.
" + }, + "404": { + "description": "If no cursor with the specified identifier can be found, the server will respond with HTTP 404.
" + } + }, + "summary": " Read next batch from cursor", + "tags": [ + "Cursors" + ], + "x-examples": [], + "x-filename": "Cursors - arangod/RestHandler/RestCursorHandler.cpp" + } + }, + "/_api/database": { + "get": { + "description": "\n\nRetrieves the list of all existing databases
Note: retrieving the list of databases is only possible from within the _system database.

Example:

shell> curl --dump - http://localhost:8529/_api/database\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    \"_system\" \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the list of database was compiled successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + } + }, + "summary": " List of databases", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • username: The user name as a string. If users is not specified or does not contain any users, a default user root will be created with an empty string password. This ensures that the new database will be accessible after it is created.
  • users: Has to be a list of user objects to initially create for the new database. Each user object can contain the following attributes: \n
    • username: Loginname of the user to be created
    • passwd: Password for the user
    • active: if False the user won't be able to log into the database.
  • extra: A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
  • passwd: The user password as a string. If not specified, it will default to an empty string.
  • active: A Flag indicating whether the user account should be activated or not. The default value is true.
  • name: Has to contain a valid database name.
\n\nCreates a new database
The response is a JSON object with the attribute result set to true.
Note: creating a new database is only possible from within the _system database.

Example: Creating a database named example.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"example\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a database named mydb with two users.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/database <<EOF\n{ \n  \"name\" : \"mydb\", \n  \"users\" : [ \n    { \n      \"username\" : \"admin\", \n      \"passwd\" : \"secret\", \n      \"active\" : true \n    }, \n    { \n      \"username\" : \"tester\", \n      \"passwd\" : \"test001\", \n      \"active\" : false \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_get_api_database_new" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the database was created successfully.
" + }, + "400": { + "description": "is returned if the request parameters are invalid or if a database with the specified name already exists.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + }, + "409": { + "description": "is returned if a database with the specified name already exists.
" + } + }, + "summary": " Create database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/current": { + "get": { + "description": "\n\nRetrieves information about the current database
The response is a JSON object with the following attributes:
  • name: the name of the current database
  • id: the id of the current database
  • path: the filesystem path of the current database
  • isSystem: whether or not the current database is the _system database

Example:

shell> curl --dump - http://localhost:8529/_api/database/current\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"name\" : \"_system\", \n    \"id\" : \"121287\", \n    \"path\" : \"/tmp/vocdir.2239/databases/database-121287\", \n    \"isSystem\" : true \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the information was retrieved successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + }, + "404": { + "description": "is returned if the database could not be found.
" + } + }, + "summary": " Information of the database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/user": { + "get": { + "description": "\n\nRetrieves the list of all databases the current user can access without specifying a different username or password.

Example:

shell> curl --dump - http://localhost:8529/_api/database/user\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    \"_system\" \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the list of database was compiled successfully.
" + }, + "400": { + "description": "is returned if the request is invalid.
" + } + }, + "summary": " List of accessible databases ", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/database/{database-name}": { + "delete": { + "description": "\n\nDrops the database along with all data stored in it.
Note: dropping a database is only possible from within the _system database. The _system database itself cannot be dropped.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/database/example\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the database
", + "format": "string", + "in": "path", + "name": "database-name", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the database was dropped successfully.
" + }, + "400": { + "description": "is returned if the request is malformed.
" + }, + "403": { + "description": "is returned if the request was not executed in the _system database.
" + }, + "404": { + "description": "is returned if the database could not be found.
" + } + }, + "summary": " Drop database", + "tags": [ + "Database" + ], + "x-examples": [], + "x-filename": "Database - js/actions/api-database.js" + } + }, + "/_api/document": { + "get": { + "description": "\n\nReturns an array of all keys, ids, or URI paths for all documents in the collection identified by collection. The type of the result array is determined by the type attribute.
Note that the results have no defined order and thus the order should not be relied on.

Example: Return all document paths

shell> curl --dump - http://localhost:8529/_api/document/?collection=products\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    \"/_db/_system/_api/document/products/711580103\", \n    \"/_db/_system/_api/document/products/712235463\", \n    \"/_db/_system/_api/document/products/711907783\" \n  ] \n}\n

\n
Example: Return all document keys

shell> curl --dump - http://localhost:8529/_api/document/?collection=products&type=key\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    \"710662599\", \n    \"710334919\", \n    \"710990279\" \n  ] \n}\n

\n
Example: Collection does not exist

shell> curl --dump - http://localhost:8529/_api/document/?collection=doesnotexist\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'doesnotexist' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
", + "parameters": [ + { + "description": "The name of the collection.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "The type of the result. The following values are allowed:
  • id: returns an array of document ids (_id attributes)
  • key: returns an array of document keys (_key attributes)
  • path: returns an array of document URI paths. This is the default.
", + "in": "query", + "name": "type", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "All went good.
" + }, + "404": { + "description": "The collection does not exist.
" + } + }, + "summary": "Read all documents", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "post": { + "description": "free style json body\n\nCreates a new document in the collection named collection. A JSON representation of the document must be passed as the body of the POST request.
If the document was created successfully, then the \"Location\" header contains the path to the newly created document. The \"ETag\" header field contains the revision of the document.
The body of the response contains a JSON object with the following attributes:
  • _id contains the document handle of the newly created document
  • _key contains the document key
  • _rev contains the document revision
If the collection parameter waitForSync is false, then the call returns as soon as the document has been accepted. It will not wait until the document has been synced to disk.
Optionally, the URL parameter waitForSync can be used to force synchronization of the document creation operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just this specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

Example: Create a document in a collection named products. Note that the revision identifier might or might not by equal to the auto-generated key.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\netag: \"708172231\"\nlocation: /_db/_system/_api/document/products/708172231\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/708172231\", \n  \"_rev\" : \"708172231\", \n  \"_key\" : \"708172231\" \n}\n

\n
Example: Create a document in a collection named products with a collection-level waitForSync value of false.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"707647943\"\nlocation: /_db/_system/_api/document/products/707647943\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/707647943\", \n  \"_rev\" : \"707647943\", \n  \"_key\" : \"707647943\" \n}\n

\n
Example: Create a document in a collection with a collection-level waitForSync value of false, but using the waitForSync URL parameter.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products&waitForSync=true <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\netag: \"709220807\"\nlocation: /_db/_system/_api/document/products/709220807\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/709220807\", \n  \"_rev\" : \"709220807\", \n  \"_key\" : \"709220807\" \n}\n

\n
Example: Create a document in a new, named collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products&createCollection=true <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"708696519\"\nlocation: /_db/_system/_api/document/products/708696519\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/708696519\", \n  \"_rev\" : \"708696519\", \n  \"_key\" : \"708696519\" \n}\n

\n
Example: Unknown collection name

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ \"Hello\": \"World\" }\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Illegal document

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/document?collection=products <<EOF\n{ 1: \"World\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting attribute name\", \n  \"code\" : 400, \n  \"errorNum\" : 600 \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the document.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of true or yes, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
Note: this flag is not supported in a cluster. Using it will result in an error.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if the document was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": "Create document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + } + }, + "/_api/document/{document-handle}": { + "delete": { + "description": "\n\nThe body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the removed document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

Example: Using document handle:

shell> curl -X DELETE --dump - http://localhost:8529/_api/document/products/700832199\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/700832199\", \n  \"_rev\" : \"700832199\", \n  \"_key\" : \"700832199\" \n}\n

\n
Example: Unknown document handle:

shell> curl -X DELETE --dump - http://localhost:8529/_api/document/products/702994887\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n
Example: Revision conflict:

shell> curl -X DELETE --header 'If-Match: \"702339527\"' --dump - http://localhost:8529/_api/document/products/702011847\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"702011847\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/702011847\", \n  \"_rev\" : \"702011847\", \n  \"_key\" : \"702011847\" \n}\n

\n
", + "parameters": [ + { + "description": "Removes the document identified by document-handle.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally remove a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing documents (see replacing documents for more details).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "Wait until deletion operation has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally remove a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was removed successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was removed successfully and waitForSync was false.
" + }, + "404": { + "description": "is returned if the collection or the document was not found. The response body contains an error document in this case.
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Removes a document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "get": { + "description": "\n\nReturns the document identified by document-handle. The returned document contains three special attributes: _id containing the document handle, _key containing key which uniquely identifies a document in a given collection and _rev containing the revision.

Example: Use a document handle:

shell> curl --dump - http://localhost:8529/_api/document/products/709745095\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"709745095\"\n\n{ \n  \"hello\" : \"world\", \n  \"_id\" : \"products/709745095\", \n  \"_rev\" : \"709745095\", \n  \"_key\" : \"709745095\" \n}\n

\n
Example: Use a document handle and an etag:

shell> curl --header 'If-None-Match: \"713415111\"' --dump - http://localhost:8529/_api/document/products/713415111\n\n

\n
Example: Unknown document handle:

shell> curl --dump - http://localhost:8529/_api/document/products/unknownhandle\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
", + "parameters": [ + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. The document is returned, if it has a different revision than the given etag. Otherwise an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one etag. The document is returned, if it has the same revision as the given etag. Otherwise a HTTP 412 is returned. As an alternative you can supply the etag in an attribute rev in the URL.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the document has the same version
" + }, + "404": { + "description": "is returned if the document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "Read document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "head": { + "description": "\n\nLike GET, but only returns the header fields and not the body. You can use this call to get the current revision of a document or check if the document was deleted.

Example:

shell> curl -X HEAD --dump - http://localhost:8529/_api/document/products/712825287\n\n

\n

", + "parameters": [ + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally fetch a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. If the current document revision is different to the specified etag, an HTTP 200 response is returned. If the current document revision is identical to the specified etag, then an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "You can conditionally fetch a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the document has same version
" + }, + "404": { + "description": "is returned if the document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the etag header.
" + } + }, + "summary": "Read document header", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "patch": { + "description": "free style json body\n\nPartially updates the document identified by document-handle. The body of the request must contain a JSON document with the attributes to patch (the patch document). All attributes from the patch document will be added to the existing document if they do not yet exist, and overwritten in the existing document if they do exist there.
Setting an attribute value to null in the patch document will cause a value of null be saved for the attribute by default.
Optionally, the URL parameter waitForSync can be used to force synchronization of the document update operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
You can conditionally update a document based on a target revision id by using either the rev URL parameter or the if-match HTTP header. To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing documents (see replacing documents for details).

Example: patches an existing document with new content.

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855 <<EOF\n{ \n  \"hello\" : \"world\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"704174535\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704174535\", \n  \"_key\" : \"703846855\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855 <<EOF\n{ \n  \"numbers\" : { \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"empty\" : null \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"704764359\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704764359\", \n  \"_key\" : \"703846855\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/703846855\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"704764359\"\n\n{ \n  \"one\" : \"world\", \n  \"hello\" : \"world\", \n  \"numbers\" : { \n    \"empty\" : null, \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3 \n  }, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"704764359\", \n  \"_key\" : \"703846855\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/703846855?keepNull=false <<EOF\n{ \n  \"hello\" : null, \n  \"numbers\" : { \n    \"four\" : 4 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"705223111\"\nlocation: /_db/_system/_api/document/products/703846855\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"705223111\", \n  \"_key\" : \"703846855\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/703846855\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"705223111\"\n\n{ \n  \"one\" : \"world\", \n  \"numbers\" : { \n    \"empty\" : null, \n    \"one\" : 1, \n    \"two\" : 2, \n    \"three\" : 3, \n    \"four\" : 4 \n  }, \n  \"_id\" : \"products/703846855\", \n  \"_rev\" : \"705223111\", \n  \"_key\" : \"703846855\" \n}\n

\n
Example: Merging attributes of an object using `mergeObjects`:

shell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"706075079\"\n\n{ \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"706075079\", \n  \"_key\" : \"706075079\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/706075079?mergeObjects=true <<EOF\n{ \n  \"inhabitants\" : { \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  } \n}\nEOF\n\nshell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"706599367\"\n\n{ \n  \"inhabitants\" : { \n    \"china\" : 1366980000, \n    \"india\" : 1263590000, \n    \"usa\" : 319220000, \n    \"indonesia\" : 252164800, \n    \"brazil\" : 203553000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"706599367\", \n  \"_key\" : \"706075079\" \n}\nshell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/document/products/706075079?mergeObjects=false <<EOF\n{ \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  } \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"707058119\"\nlocation: /_db/_system/_api/document/products/706075079\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"707058119\", \n  \"_key\" : \"706075079\" \n}\nshell> curl --dump - http://localhost:8529/_api/document/products/706075079\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"707058119\"\n\n{ \n  \"inhabitants\" : { \n    \"pakistan\" : 188346000 \n  }, \n  \"_id\" : \"products/706075079\", \n  \"_rev\" : \"707058119\", \n  \"_key\" : \"706075079\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the document update.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the intention is to delete existing attributes with the patch command, the URL query parameter keepNull can be used with a value of false. This will modify the behavior of the patch command to remove any attributes from the existing document that are contained in the patch document with an attribute value of null.
", + "in": "query", + "name": "keepNull", + "required": false, + "type": "boolean" + }, + { + "description": "Controls whether objects (not arrays) will be merged if present in both the existing and the patch document. If set to false, the value in the patch document will overwrite the existing document's value. If set to true, objects will be merged. The default is true.
", + "in": "query", + "name": "mergeObjects", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally patch a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter.
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally patch a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Patch document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + }, + "put": { + "description": "free style json body\n\nCompletely updates (i.e. replaces) the document identified by document-handle. If the document exists and can be updated, then a HTTP 201 is returned and the \"ETag\" header field contains the new revision of the document.
If the new document passed in the body of the request contains the document-handle in the attribute _id and the revision in _rev, these attributes will be ignored. Only the URI and the \"ETag\" header are relevant in order to avoid confusion when using proxies.

Optionally, the URL parameter waitForSync can be used to force synchronization of the document replacement operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.

The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
There are two ways for specifying the targeted document revision id for conditional replacements (i.e. replacements that will only be executed if the revision id found in the database matches the document revision id specified in the request):
  • specifying the target revision in the rev URL query parameter
  • specifying the target revision in the if-match HTTP header
    Specifying a target revision is optional, however, if done, only one of the described mechanisms must be used (either the rev URL parameter or the if-match HTTP header). Regardless which mechanism is used, the parameter needs to contain the target document revision id as returned in the _rev attribute of a document or by an HTTP etag header.
For example, to conditionally replace a document based on a specific revision id, you can use the following request:

`PUT /_api/document/document-handle?rev=etag`

If a target revision id is provided in the request (e.g. via the etag value in the rev URL query parameter above), ArangoDB will check that the revision id of the document found in the database is equal to the target revision id provided in the request. If there is a mismatch between the revision id, then by default a HTTP 412 conflict is returned and no replacement is performed.

The conditional update behavior can be overridden with the policy URL query parameter:

`PUT /_api/document/document-handle?policy=policy`

If policy is set to error, then the behavior is as before: replacements will fail if the revision id found in the database does not match the target revision id specified in the request.
If policy is set to last, then the replacement will succeed, even if the revision id found in the database does not match the target revision id specified in the request. You can use the last *policy* to force replacements.

Example: Using a document handle

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/714004935 <<EOF\n{\"Hello\": \"you\"}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"714332615\"\nlocation: /_db/_system/_api/document/products/714004935\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/714004935\", \n  \"_rev\" : \"714332615\", \n  \"_key\" : \"714004935\" \n}\n

\n
Example: Unknown document handle

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/718199239 <<EOF\n{}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"document not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1202 \n}\n

\n
Example: Produce a revision conflict

shell> curl -X PUT --header 'If-Match: \"715184583\"' --data-binary @- --dump - http://localhost:8529/_api/document/products/714856903 <<EOF\n{\"other\":\"content\"}\nEOF\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"714856903\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/714856903\", \n  \"_rev\" : \"714856903\", \n  \"_key\" : \"714856903\" \n}\n

\n
Example: Last write wins

shell> curl -X PUT --header 'If-Match: \"716298695\"' --data-binary @- --dump - http://localhost:8529/_api/document/products/715971015?policy=last <<EOF\n{}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"716560839\"\nlocation: /_db/_system/_api/document/products/715971015\n\n{ \n  \"error\" : false, \n  \"_id\" : \"products/715971015\", \n  \"_rev\" : \"716560839\", \n  \"_key\" : \"715971015\" \n}\n

\n
Example: Alternative to header fields

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/document/products/717085127?rev=717412807 <<EOF\n{\"other\":\"content\"}\nEOF\n\nHTTP/1.1 412 Precondition Failed\ncontent-type: application/json; charset=utf-8\netag: \"717085127\"\n\n{ \n  \"error\" : true, \n  \"code\" : 412, \n  \"errorNum\" : 1200, \n  \"errorMessage\" : \"precondition failed\", \n  \"_id\" : \"products/717085127\", \n  \"_rev\" : \"717085127\", \n  \"_key\" : \"717085127\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the new document.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "Wait until document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally replace a document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter (see below).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally replace a document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was replaced successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was replaced successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a document. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "Replace document", + "tags": [ + "Documents" + ], + "x-examples": [], + "x-filename": "Documents - arangod/RestHandler/RestDocumentHandler.cpp" + } + }, + "/_api/edge": { + "get": { + "description": "\n\nReturns an array of all URIs for all edges from the collection identified by collection.
", + "parameters": [ + { + "description": "The name of the collection.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "All went good.
" + }, + "404": { + "description": "The collection does not exist.
" + } + }, + "summary": " Read all edges from collection", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "post": { + "description": "free style json body\n\nCreates a new edge document in the collection named collection. A JSON representation of the document must be passed as the body of the POST request.
The from and to handles are immutable once the edge has been created.
In all other respects the method works like POST /document.

Example: Create an edge and read it back:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/edge/?collection=edges&from=vertices/1&to=vertices/2 <<EOF\n{ \n  \"name\" : \"Emil\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json; charset=utf-8\netag: \"721082823\"\nlocation: /_db/_system/_api/edge/edges/721082823\n\n{ \n  \"error\" : false, \n  \"_id\" : \"edges/721082823\", \n  \"_rev\" : \"721082823\", \n  \"_key\" : \"721082823\" \n}\nshell> curl --dump - http://localhost:8529/_api/edge/edges/721082823\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\netag: \"721082823\"\n\n{ \n  \"name\" : \"Emil\", \n  \"_id\" : \"edges/721082823\", \n  \"_rev\" : \"721082823\", \n  \"_key\" : \"721082823\", \n  \"_from\" : \"vertices/1\", \n  \"_to\" : \"vertices/2\" \n}\n

\n
", + "parameters": [ + { + "description": "A JSON representation of the edge document must be passed as the body of the POST request. This JSON object may contain the edge's document key in the _key attribute if needed.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "Creates a new edge in the collection identified by collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of true or yes, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
Note: This flag is not supported in a cluster. Using it will result in an error.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until the edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "The document handle of the start point must be passed in from handle.
", + "in": "query", + "name": "from", + "required": true, + "type": "string" + }, + { + "description": "The document handle of the end point must be passed in to handle.
", + "in": "query", + "name": "to", + "required": true, + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the edge was created successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge was created successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of an edge, or if the collection specified is not an edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": "Create edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/edge/{document-handle}": { + "delete": { + "description": "\n\nThe body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the deleted edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
", + "parameters": [ + { + "description": "Deletes the edge document identified by document-handle.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally delete an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing edge documents (see replacing edge documents for more details).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally delete an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge document was deleted successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge document was deleted successfully and waitForSync was false.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found. The response body contains an error document in this case.
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Deletes edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "get": { + "description": "\n\nReturns the edge identified by document-handle. The returned edge contains a few special attributes:
  • _id contains the document handle
  • _rev contains the revision
  • _from and to contain the document handles of the connected vertex documents
", + "parameters": [ + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. The edge is returned if it has a different revision than the given etag. Otherwise an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one etag. The edge is returned if it has the same revision ad the given etag. Otherwise a HTTP 412 is returned. As an alternative you can supply the etag in an attribute rev in the URL.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the edge has the same version
" + }, + "404": { + "description": "is returned if the edge or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Read edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "head": { + "description": "\n\nLike GET, but only returns the header fields and not the body. You can use this call to get the current revision of an edge document or check if it was deleted.
", + "parameters": [ + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "You can conditionally fetch an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one etag. If the current document revision is different to the specified etag, an HTTP 200 response is returned. If the current document revision is identical to the specified etag, then an HTTP 304 is returned.
", + "in": "header", + "name": "If-None-Match", + "type": "string" + }, + { + "description": "You can conditionally fetch an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge document was found
" + }, + "304": { + "description": "is returned if the \"If-None-Match\" header is given and the edge document has same version
" + }, + "404": { + "description": "is returned if the edge document or collection was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the etag header.
" + } + }, + "summary": " Read edge header", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "patch": { + "description": "free style json body\n\nPartially updates the edge document identified by document-handle. The body of the request must contain a JSON document with the attributes to patch (the patch document). All attributes from the patch document will be added to the existing edge document if they do not yet exist, and overwritten in the existing edge document if they do exist there.
Setting an attribute value to null in the patch document will cause a value of null be saved for the attribute by default.
Note: Internal attributes such as _key, _from and _to are immutable once set and cannot be updated.
Optionally, the URL parameter waitForSync can be used to force synchronization of the edge document update operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the edge document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
You can conditionally update an edge document based on a target revision id by using either the rev URL parameter or the if-match HTTP header. To control the update behavior in case there is a revision mismatch, you can use the policy parameter. This is the same as when replacing edge documents (see replacing documents for details).
", + "parameters": [ + { + "description": "A JSON representation of the edge update.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "If the intention is to delete existing attributes with the patch command, the URL query parameter keepNull can be used with a value of false. This will modify the behavior of the patch command to remove any attributes from the existing edge document that are contained in the patch document with an attribute value of null.
", + "in": "query", + "name": "keepNull", + "required": false, + "type": "boolean" + }, + { + "description": "Controls whether objects (not arrays) will be merged if present in both the existing and the patch edge. If set to false, the value in the patch edge will overwrite the existing edge's value. If set to true, objects will be merged. The default is true.
", + "in": "query", + "name": "mergeObjects", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally patch an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter.
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally patch an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the document was patched successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the document was patched successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation or when applied on an non-edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": " Patches edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + }, + "put": { + "description": "free style json body\n\nCompletely updates (i.e. replaces) the edge document identified by document-handle. If the edge document exists and can be updated, then a HTTP 201 is returned and the \"ETag\" header field contains the new revision of the edge document.
If the new edge document passed in the body of the request contains the document-handle in the attribute _id and the revision in _rev, these attributes will be ignored. Only the URI and the \"ETag\" header are relevant in order to avoid confusion when using proxies. Note: The attributes _from and _to of an edge are immutable and cannot be updated either.
Optionally, the URL parameter waitForSync can be used to force synchronization of the edge document replacement operation to disk even in case that the waitForSync flag had been disabled for the entire collection. Thus, the waitForSync URL parameter can be used to force synchronization of just specific operations. To use this, set the waitForSync parameter to true. If the waitForSync parameter is not specified or set to false, then the collection's default waitForSync behavior is applied. The waitForSync URL parameter cannot be used to disable synchronization for collections that have a default waitForSync value of true.
The body of the response contains a JSON object with the information about the handle and the revision. The attribute _id contains the known document-handle of the updated edge document, _key contains the key which uniquely identifies a document in a given collection, and the attribute _rev contains the new document revision.
If the edge document does not exist, then a HTTP 404 is returned and the body of the response contains an error document.
There are two ways for specifying the targeted revision id for conditional replacements (i.e. replacements that will only be executed if the revision id found in the database matches the revision id specified in the request):
  • specifying the target revision in the rev URL query parameter
  • specifying the target revision in the if-match HTTP header
Specifying a target revision is optional, however, if done, only one of the described mechanisms must be used (either the rev URL parameter or the if-match HTTP header). Regardless which mechanism is used, the parameter needs to contain the target revision id as returned in the _rev attribute of an edge document or by an HTTP etag header.
For example, to conditionally replace an edge document based on a specific revision id, you can use the following request:
  • PUT /_api/document/document-handle?rev=etag
If a target revision id is provided in the request (e.g. via the etag value in the rev URL query parameter above), ArangoDB will check that the revision id of the edge document found in the database is equal to the target revision id provided in the request. If there is a mismatch between the revision id, then by default a HTTP 412 conflict is returned and no replacement is performed.
The conditional update behavior can be overridden with the policy URL query parameter:
  • PUT /_api/document/document-handle?policy=policy
If policy is set to error, then the behavior is as before: replacements will fail if the revision id found in the database does not match the target revision id specified in the request.
If policy is set to last, then the replacement will succeed, even if the revision id found in the database does not match the target revision id specified in the request. You can use the last *policy* to force replacements.
", + "parameters": [ + { + "description": "A JSON representation of the new edge data.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The handle of the edge document.
", + "format": "string", + "in": "path", + "name": "document-handle", + "required": true, + "type": "string" + }, + { + "description": "Wait until edge document has been synced to disk.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "You can conditionally replace an edge document based on a target revision id by using the rev URL parameter.
", + "in": "query", + "name": "rev", + "required": false, + "type": "string" + }, + { + "description": "To control the update behavior in case there is a revision mismatch, you can use the policy parameter (see below).
", + "in": "query", + "name": "policy", + "required": false, + "type": "string" + }, + { + "description": "You can conditionally replace an edge document based on a target revision id by using the if-match HTTP header.
", + "in": "header", + "name": "If-Match", + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the edge document was replaced successfully and waitForSync was true.
" + }, + "202": { + "description": "is returned if the edge document was replaced successfully and waitForSync was false.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of an edge document or if applied to a non-edge collection. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection or the edge document was not found
" + }, + "412": { + "description": "is returned if a \"If-Match\" header or rev is given and the found document has a different version. The response will also contain the found document's current revision in the _rev attribute. Additionally, the attributes _id and _key will be returned.
" + } + }, + "summary": "replaces an edge", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/edges/{collection-id}": { + "get": { + "description": "\n\nReturns an array of edges starting or ending in the vertex identified by vertex-handle.

Example: Any direction

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/6\", \n      \"_key\" : \"6\", \n      \"_rev\" : \"725211591\", \n      \"_from\" : \"vertices/2\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v2 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/7\", \n      \"_key\" : \"7\", \n      \"_rev\" : \"725735879\", \n      \"_from\" : \"vertices/4\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v4 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/5\", \n      \"_key\" : \"5\", \n      \"_rev\" : \"724687303\", \n      \"_from\" : \"vertices/1\", \n      \"_to\" : \"vertices/3\", \n      \"$label\" : \"v1 -> v3\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: In edges

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=in\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/6\", \n      \"_key\" : \"6\", \n      \"_rev\" : \"729930183\", \n      \"_from\" : \"vertices/2\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v2 -> v1\" \n    }, \n    { \n      \"_id\" : \"edges/7\", \n      \"_key\" : \"7\", \n      \"_rev\" : \"730454471\", \n      \"_from\" : \"vertices/4\", \n      \"_to\" : \"vertices/1\", \n      \"$label\" : \"v4 -> v1\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Out edges

shell> curl --dump - http://localhost:8529/_api/edges/edges?vertex=vertices/1&direction=out\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"edges\" : [ \n    { \n      \"_id\" : \"edges/5\", \n      \"_key\" : \"5\", \n      \"_rev\" : \"734124487\", \n      \"_from\" : \"vertices/1\", \n      \"_to\" : \"vertices/3\", \n      \"$label\" : \"v1 -> v3\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the collection.
", + "format": "string", + "in": "path", + "name": "collection-id", + "required": true, + "type": "string" + }, + { + "description": "The id of the start vertex.
", + "in": "query", + "name": "vertex", + "required": true, + "type": "string" + }, + { + "description": "Selects in or out direction for edges. If not set, any edges are returned.
", + "in": "query", + "name": "direction", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the edge collection was found and edges were retrieved.
" + }, + "400": { + "description": "is returned if the request contains invalid parameters.
" + }, + "404": { + "description": "is returned if the edge collection was not found.
" + } + }, + "summary": " Read in- or outbound edges", + "tags": [ + "Graph edges" + ], + "x-examples": [], + "x-filename": "Graph edges - arangod/RestHandler/RestEdgeHandler.cpp, js/actions/api-edges.js" + } + }, + "/_api/endpoint": { + "get": { + "description": "\n\nReturns an array of all configured endpoints the server is listening on. For each endpoint, the array of allowed databases is returned too if set.
The result is a JSON object which has the endpoints as keys, and an array of mapped database names as values for each endpoint.
If an array of mapped databases is empty, it means that all databases can be accessed via the endpoint. If an array of mapped databases contains more than one database name, this means that any of the databases might be accessed via the endpoint, and the first database in the arry will be treated as the default database for the endpoint. The default database will be used when an incoming request does not specify a database name in the request explicitly.
Note: retrieving the array of all endpoints is allowed in the system database only. Calling this action in any other database will make the server return an error.

Example:

shell> curl --dump - http://localhost:8529/_api/endpoint\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"endpoint\" : \"tcp://127.0.0.1:32239\", \n    \"databases\" : [ ] \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned when the array of endpoints can be determined successfully.
" + }, + "400": { + "description": "is returned if the action is not carried out in the system database.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + } + }, + "summary": " Return list of all endpoints", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/explain": { + "post": { + "description": "**A json post document with these Properties is required:**
  • query: the query which you want explained; If the query references any bind variables, these must also be passed in the attribute bindVars. Additional options for the query can be passed in the options attribute.
  • options: Options for the query
    • optimizer.rules: an array of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. To disable a rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is also a pseudo-rule `all`, which will match all optimizer rules. of type string
    • maxNumberOfPlans: an optional maximum number of plans that the optimizer is allowed to generate. Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does.
    • allPlans: if set to true, all possible execution plans will be returned. The default is false, meaning only the optimal plan will be returned.
  • bindVars: key/value pairs representing the bind values of type object
\n\n
To explain how an AQL query would be executed on the server, the query string can be sent to the server via an HTTP POST request. The server will then validate the query and create an execution plan for it. The execution plan will be returned, but the query will not be executed.
The execution plan that is returned by the server can be used to estimate the probable performance of the query. Though the actual performance will depend on many different factors, the execution plan normally can provide some rough estimates on the amount of work the server needs to do in order to actually run the query.
By default, the explain operation will return the optimal plan as chosen by the query optimizer The optimal plan is the plan with the lowest total estimated cost. The plan will be returned in the attribute plan of the response object. If the option allPlans is specified in the request, the result will contain all plans created by the optimizer. The plans will then be returned in the attribute plans.
The result will also contain an attribute warnings, which is an array of warnings that occurred during optimization or execution plan creation. Additionally, a stats attribute is contained in the result with some optimizer statistics.
Each plan in the result is a JSON object with the following attributes:
  • nodes: the array of execution nodes of the plan. The array of available node types can be found [here](../Aql/Optimizer.html)
  • estimatedCost: the total estimated cost for the plan. If there are multiple plans, the optimizer will choose the plan with the lowest total cost.
  • collections: an array of collections used in the query
  • rules: an array of rules the optimizer applied. An overview of the available rules can be found [here](../Aql/Optimizer.html)
  • variables: array of variables used in the query (note: this may contain internal variables created by the optimizer)

Example: Valid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products RETURN p\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"EnumerateCollectionNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 11, \n        \"estimatedNrItems\" : 10, \n        \"database\" : \"_system\", \n        \"collection\" : \"products\", \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        }, \n        \"random\" : false \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 21, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      } \n    ], \n    \"rules\" : [ ], \n    \"collections\" : [ \n      { \n        \"name\" : \"products\", \n        \"type\" : \"read\" \n      } \n    ], \n    \"variables\" : [ \n      { \n        \"id\" : 0, \n        \"name\" : \"p\" \n      } \n    ], \n    \"estimatedCost\" : 21, \n    \"estimatedNrItems\" : 10 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: A plan with some optimizer rules applied

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"IndexRangeNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 11, \n        \"estimatedCost\" : 11, \n        \"estimatedNrItems\" : 10, \n        \"database\" : \"_system\", \n        \"collection\" : \"products\", \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        }, \n        \"ranges\" : [ \n          [ ] \n        ], \n        \"index\" : { \n          \"type\" : \"skiplist\", \n          \"id\" : \"737008071\", \n          \"unique\" : false, \n          \"sparse\" : false, \n          \"fields\" : [ \n            \"id\" \n          ] \n        }, \n        \"reverse\" : false \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          11 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 21, \n        \"estimatedNrItems\" : 10, \n        \"expression\" : { \n          \"type\" : \"attribute access\", \n          \"name\" : \"id\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 1, \n          \"name\" : \"a\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"attribute\" \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 4, \n        \"estimatedCost\" : 31, \n        \"estimatedNrItems\" : 10, \n        \"expression\" : { \n          \"type\" : \"compare ==\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"a\", \n              \"id\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 4 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"simple\" \n      }, \n      { \n        \"type\" : \"FilterNode\", \n        \"dependencies\" : [ \n          4 \n        ], \n        \"id\" : 5, \n        \"estimatedCost\" : 41, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        } \n      }, \n      { \n        \"type\" : \"LimitNode\", \n        \"dependencies\" : [ \n          5 \n        ], \n        \"id\" : 9, \n        \"estimatedCost\" : 42, \n        \"estimatedNrItems\" : 1, \n        \"offset\" : 0, \n        \"limit\" : 1, \n        \"fullCount\" : false \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          9 \n        ], \n        \"id\" : 6, \n        \"estimatedCost\" : 43, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"attribute access\", \n          \"name\" : \"name\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"reference\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"attribute\" \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          6 \n        ], \n        \"id\" : 10, \n        \"estimatedCost\" : 44, \n        \"estimatedNrItems\" : 1, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"remove-redundant-calculations\", \n      \"move-calculations-up-2\", \n      \"use-index-for-sort\", \n      \"remove-unnecessary-calculations-2\", \n      \"move-calculations-down\" \n    ], \n    \"collections\" : [ \n      { \n        \"name\" : \"products\", \n        \"type\" : \"read\" \n      } \n    ], \n    \"variables\" : [ \n      { \n        \"id\" : 6, \n        \"name\" : \"5\" \n      }, \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"name\" \n      }, \n      { \n        \"id\" : 1, \n        \"name\" : \"a\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"p\" \n      } \n    ], \n    \"estimatedCost\" : 44, \n    \"estimatedNrItems\" : 1 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 35, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using some options

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products LET a = p.id FILTER a == 4 LET name = p.name SORT p.id LIMIT 1 RETURN name\", \n  \"options\" : { \n    \"maxNumberOfPlans\" : 2, \n    \"allPlans\" : true, \n    \"optimizer\" : { \n      \"rules\" : [ \n        \"-all\", \n        \"+use-index-for-sort\", \n        \"+use-index-range\" \n      ] \n    } \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plans\" : [ \n    { \n      \"nodes\" : [ \n        { \n          \"type\" : \"SingletonNode\", \n          \"dependencies\" : [ ], \n          \"id\" : 1, \n          \"estimatedCost\" : 1, \n          \"estimatedNrItems\" : 1 \n        }, \n        { \n          \"type\" : \"IndexRangeNode\", \n          \"dependencies\" : [ \n            1 \n          ], \n          \"id\" : 11, \n          \"estimatedCost\" : 11, \n          \"estimatedNrItems\" : 10, \n          \"database\" : \"_system\", \n          \"collection\" : \"products\", \n          \"outVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          }, \n          \"ranges\" : [ \n            [ ] \n          ], \n          \"index\" : { \n            \"type\" : \"skiplist\", \n            \"id\" : \"739563975\", \n            \"unique\" : false, \n            \"sparse\" : false, \n            \"fields\" : [ \n              \"id\" \n            ] \n          }, \n          \"reverse\" : false \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            11 \n          ], \n          \"id\" : 3, \n          \"estimatedCost\" : 21, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"id\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 1, \n            \"name\" : \"a\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            3 \n          ], \n          \"id\" : 4, \n          \"estimatedCost\" : 31, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"compare ==\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"a\", \n                \"id\" : 1 \n              }, \n              { \n                \"type\" : \"value\", \n                \"value\" : 4 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 4, \n            \"name\" : \"3\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"simple\" \n        }, \n        { \n          \"type\" : \"FilterNode\", \n          \"dependencies\" : [ \n            4 \n          ], \n          \"id\" : 5, \n          \"estimatedCost\" : 41, \n          \"estimatedNrItems\" : 10, \n          \"inVariable\" : { \n            \"id\" : 4, \n            \"name\" : \"3\" \n          } \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            5 \n          ], \n          \"id\" : 6, \n          \"estimatedCost\" : 51, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"name\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 2, \n            \"name\" : \"name\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"CalculationNode\", \n          \"dependencies\" : [ \n            6 \n          ], \n          \"id\" : 7, \n          \"estimatedCost\" : 61, \n          \"estimatedNrItems\" : 10, \n          \"expression\" : { \n            \"type\" : \"attribute access\", \n            \"name\" : \"id\", \n            \"subNodes\" : [ \n              { \n                \"type\" : \"reference\", \n                \"name\" : \"p\", \n                \"id\" : 0 \n              } \n            ] \n          }, \n          \"outVariable\" : { \n            \"id\" : 6, \n            \"name\" : \"5\" \n          }, \n          \"canThrow\" : false, \n          \"expressionType\" : \"attribute\" \n        }, \n        { \n          \"type\" : \"LimitNode\", \n          \"dependencies\" : [ \n            7 \n          ], \n          \"id\" : 9, \n          \"estimatedCost\" : 62, \n          \"estimatedNrItems\" : 1, \n          \"offset\" : 0, \n          \"limit\" : 1, \n          \"fullCount\" : false \n        }, \n        { \n          \"type\" : \"ReturnNode\", \n          \"dependencies\" : [ \n            9 \n          ], \n          \"id\" : 10, \n          \"estimatedCost\" : 63, \n          \"estimatedNrItems\" : 1, \n          \"inVariable\" : { \n            \"id\" : 2, \n            \"name\" : \"name\" \n          } \n        } \n      ], \n      \"rules\" : [ \n        \"use-index-for-sort\" \n      ], \n      \"collections\" : [ \n        { \n          \"name\" : \"products\", \n          \"type\" : \"read\" \n        } \n      ], \n      \"variables\" : [ \n        { \n          \"id\" : 6, \n          \"name\" : \"5\" \n        }, \n        { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        { \n          \"id\" : 2, \n          \"name\" : \"name\" \n        }, \n        { \n          \"id\" : 1, \n          \"name\" : \"a\" \n        }, \n        { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      ], \n      \"estimatedCost\" : 63, \n      \"estimatedNrItems\" : 1 \n    } \n  ], \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 4, \n    \"rulesSkipped\" : 31, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Returning all plans

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products FILTER p.id == 25 RETURN p\", \n  \"options\" : { \n    \"allPlans\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plans\" : [ \n    { \n      \"nodes\" : [ \n        { \n          \"type\" : \"SingletonNode\", \n          \"dependencies\" : [ ], \n          \"id\" : 1, \n          \"estimatedCost\" : 1, \n          \"estimatedNrItems\" : 1 \n        }, \n        { \n          \"type\" : \"IndexRangeNode\", \n          \"dependencies\" : [ \n            1 \n          ], \n          \"id\" : 6, \n          \"estimatedCost\" : 1.9899995050000001, \n          \"estimatedNrItems\" : 1, \n          \"database\" : \"_system\", \n          \"collection\" : \"products\", \n          \"outVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          }, \n          \"ranges\" : [ \n            [ \n              { \n                \"variable\" : \"p\", \n                \"attr\" : \"id\", \n                \"lowConst\" : { \n                  \"bound\" : 25, \n                  \"include\" : true, \n                  \"isConstant\" : true \n                }, \n                \"highConst\" : { \n                  \"bound\" : 25, \n                  \"include\" : true, \n                  \"isConstant\" : true \n                }, \n                \"lows\" : [ ], \n                \"highs\" : [ ], \n                \"valid\" : true, \n                \"equality\" : true \n              } \n            ] \n          ], \n          \"index\" : { \n            \"type\" : \"hash\", \n            \"id\" : \"736025031\", \n            \"unique\" : false, \n            \"sparse\" : false, \n            \"selectivityEstimate\" : 1, \n            \"fields\" : [ \n              \"id\" \n            ] \n          }, \n          \"reverse\" : false \n        }, \n        { \n          \"type\" : \"ReturnNode\", \n          \"dependencies\" : [ \n            6 \n          ], \n          \"id\" : 5, \n          \"estimatedCost\" : 2.989999505, \n          \"estimatedNrItems\" : 1, \n          \"inVariable\" : { \n            \"id\" : 0, \n            \"name\" : \"p\" \n          } \n        } \n      ], \n      \"rules\" : [ \n        \"use-index-range\", \n        \"remove-filter-covered-by-index\" \n      ], \n      \"collections\" : [ \n        { \n          \"name\" : \"products\", \n          \"type\" : \"read\" \n        } \n      ], \n      \"variables\" : [ \n        { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        { \n          \"id\" : 0, \n          \"name\" : \"p\" \n        } \n      ], \n      \"estimatedCost\" : 2.989999505, \n      \"estimatedNrItems\" : 1 \n    } \n  ], \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: A query that produces a warning

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 RETURN 1 / 0\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 2, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"range\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 10 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"simple\" \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 4, \n        \"estimatedCost\" : 3, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"value\", \n          \"value\" : null \n        }, \n        \"outVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"json\" \n      }, \n      { \n        \"type\" : \"EnumerateListNode\", \n        \"dependencies\" : [ \n          4 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 13, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 5, \n        \"estimatedCost\" : 23, \n        \"estimatedNrItems\" : 10, \n        \"inVariable\" : { \n          \"id\" : 4, \n          \"name\" : \"3\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"move-calculations-up-2\" \n    ], \n    \"collections\" : [ ], \n    \"variables\" : [ \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"1\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"i\" \n      } \n    ], \n    \"estimatedCost\" : 23, \n    \"estimatedNrItems\" : 10 \n  }, \n  \"warnings\" : [ \n    { \n      \"code\" : 1562, \n      \"message\" : \"division by zero\" \n    } \n  ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Invalid query (missing bind parameter)

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \n  \"query\" : \"FOR p IN products FILTER p.id == @id LIMIT 2 RETURN p.n\" \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1551, \n  \"errorMessage\" : \"no value specified for declared bind parameter 'id' (while parsing)\" \n}\n

\n
Example: The data returned in the plan attribute of the result contains one element per AQL top-level statement (i.e. FOR, RETURN, FILTER etc.). If the query optimizer removed some unnecessary statements, the result might also contain less elements than there were top-level statements in the AQL query. The following example shows a query with a non-sensible filter condition that the optimizer has removed so that there are less top-level statements.

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/explain <<EOF\n{ \"query\" : \"FOR i IN [ 1, 2, 3 ] FILTER 1 == 2 RETURN i\" }\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"plan\" : { \n    \"nodes\" : [ \n      { \n        \"type\" : \"SingletonNode\", \n        \"dependencies\" : [ ], \n        \"id\" : 1, \n        \"estimatedCost\" : 1, \n        \"estimatedNrItems\" : 1 \n      }, \n      { \n        \"type\" : \"CalculationNode\", \n        \"dependencies\" : [ \n          1 \n        ], \n        \"id\" : 2, \n        \"estimatedCost\" : 2, \n        \"estimatedNrItems\" : 1, \n        \"expression\" : { \n          \"type\" : \"array\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 1 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 2 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 3 \n            } \n          ] \n        }, \n        \"outVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"canThrow\" : false, \n        \"expressionType\" : \"json\" \n      }, \n      { \n        \"type\" : \"NoResultsNode\", \n        \"dependencies\" : [ \n          2 \n        ], \n        \"id\" : 7, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0 \n      }, \n      { \n        \"type\" : \"EnumerateListNode\", \n        \"dependencies\" : [ \n          7 \n        ], \n        \"id\" : 3, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0, \n        \"inVariable\" : { \n          \"id\" : 2, \n          \"name\" : \"1\" \n        }, \n        \"outVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      }, \n      { \n        \"type\" : \"ReturnNode\", \n        \"dependencies\" : [ \n          3 \n        ], \n        \"id\" : 6, \n        \"estimatedCost\" : 0.5, \n        \"estimatedNrItems\" : 0, \n        \"inVariable\" : { \n          \"id\" : 0, \n          \"name\" : \"i\" \n        } \n      } \n    ], \n    \"rules\" : [ \n      \"move-calculations-up\", \n      \"move-filters-up\", \n      \"remove-unnecessary-filters\", \n      \"remove-unnecessary-calculations\" \n    ], \n    \"collections\" : [ ], \n    \"variables\" : [ \n      { \n        \"id\" : 4, \n        \"name\" : \"3\" \n      }, \n      { \n        \"id\" : 2, \n        \"name\" : \"1\" \n      }, \n      { \n        \"id\" : 0, \n        \"name\" : \"i\" \n      } \n    ], \n    \"estimatedCost\" : 0.5, \n    \"estimatedNrItems\" : 0 \n  }, \n  \"warnings\" : [ ], \n  \"stats\" : { \n    \"rulesExecuted\" : 23, \n    \"rulesSkipped\" : 0, \n    \"plansCreated\" : 1 \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_explain" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the query is valid, the server will respond with HTTP 200 and return the optimal execution plan in the plan attribute of the response. If option allPlans was set in the request, an array of plans will be returned in the allPlans attribute instead.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request, or if the query contains a parse error. The body of the response will contain the error details embedded in a JSON object. Omitting bind variables if the query references any will also result in an HTTP 400 error.
" + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + } + }, + "summary": " Explain an AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/export": { + "post": { + "description": "**A json post document with these Properties is required:**
  • count: boolean flag that indicates whether the number of documents in the result set should be returned in the \"count\" attribute of the result (optional). Calculating the \"count\" attribute might in the future have a performance impact so this option is turned off by default, and \"count\" is only returned when requested.
  • restrict: an object containing an array of attribute names that will be included or excluded when returning result documents.
    Not specifying restrict will by default return all attributes of each document.
    • fields: Contains an array of attribute names to include or exclude. Matching of attribute names for inclusion or exclusion will be done on the top level only. Specifying names of nested attributes is not supported at the moment.
      of type string
    • type: has to be be set to either include or exclude depending on which you want to use
  • batchSize: maximum number of result documents to be transferred from the server to the client in one roundtrip (optional). If this attribute is not set, a server-controlled default value will be used.
  • flush: if set to true, a WAL flush operation will be executed prior to the export. The flush operation will start copying documents from the WAL to the collection's datafiles. There will be an additional wait time of up to flushWait seconds after the flush to allow the WAL collector to change the adjusted document meta-data to point into the datafiles, too. The default value is false (i.e. no flush) so most recently inserted or updated documents from the collection might be missing in the export.
  • flushWait: maximum wait time in seconds after a flush operation. The default value is 10. This option only has an effect when flush is set to true.
  • limit: an optional limit value, determining the maximum number of documents to be included in the cursor. Omitting the limit attribute or setting it to 0 will lead to no limit being used. If a limit is used, it is undefined which documents from the collection will be included in the export and which will be excluded. This is because there is no natural order of documents in a collection.
  • ttl: an optional time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. If not set, a server-defined value will be used.
\n\nA call to this method creates a cursor containing all documents in the specified collection. In contrast to other data-producing APIs, the internal data structures produced by the export API are more lightweight, so it is the preferred way to retrieve all documents from a collection.
Documents are returned in a similar manner as in the `/_api/cursor` REST API. If all documents of the collection fit into the first batch, then no cursor will be created, and the result object's hasMore attribute will be set to false. If not all documents fit into the first batch, then the result object's hasMore attribute will be set to true, and the id attribute of the result will contain a cursor id.
The order in which the documents are returned is not specified.
By default, only those documents from the collection will be returned that are stored in the collection's datafiles. Documents that are present in the write-ahead log (WAL) at the time the export is run will not be exported.
To export these documents as well, the caller can issue a WAL flush request before calling the export API or set the flush attribute. Setting the flush option will trigger a WAL flush before the export so documents get copied from the WAL to the collection datafiles.
If the result set can be created by the server, the server will respond with HTTP 201. The body of the response will contain a JSON object with the result set.
The returned JSON object has the following properties:
  • error: boolean flag to indicate that an error occurred (false in this case)
  • code: the HTTP status code
  • result: an array of result documents (might be empty if the collection was empty)
  • hasMore: a boolean indicator whether there are more results available for the cursor on the server
  • count: the total number of result documents available (only available if the query was executed with the count attribute set)
  • id: id of temporary cursor created on the server (optional, see above)
If the JSON representation is malformed or the query specification is missing from the request, the server will respond with HTTP 400.
The body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message
Clients should always delete an export cursor result as early as possible because a lingering export cursor will prevent the underlying collection from being compacted or unloaded. By default, unused cursors will be deleted automatically after a server-defined idle time, and clients can adjust this idle time by setting the ttl value.
Note: this API is currently not supported on cluster coordinators.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_export" + }, + "x-description-offset": 59 + }, + { + "description": "The name of the collection to export.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "201": { + "description": "is returned if the result set can be created by the server.
" + }, + "400": { + "description": "is returned if the JSON representation is malformed or the query specification is missing from the request.
" + }, + "404": { + "description": "The server will respond with HTTP 404 in case a non-existing collection is accessed in the query.
" + }, + "405": { + "description": "The server will respond with HTTP 405 if an unsupported HTTP method is used.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.

" + } + }, + "summary": " Create export cursor", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/gharial": { + "get": { + "description": "\n\nLists all graph names stored in this database.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"graphs\" : [ \n    { \n      \"_id\" : \"_graphs/social\", \n      \"_key\" : \"social\", \n      \"_rev\" : \"557308359\", \n      \"edgeDefinitions\" : [ \n        { \n          \"collection\" : \"relation\", \n          \"from\" : [ \n            \"female\", \n            \"male\" \n          ], \n          \"to\" : [ \n            \"female\", \n            \"male\" \n          ] \n        } \n      ], \n      \"orphanCollections\" : [ ] \n    }, \n    { \n      \"_id\" : \"_graphs/routeplanner\", \n      \"_key\" : \"routeplanner\", \n      \"_rev\" : \"560650695\", \n      \"orphanCollections\" : [ ], \n      \"edgeDefinitions\" : [ \n        { \n          \"collection\" : \"germanHighway\", \n          \"from\" : [ \n            \"germanCity\" \n          ], \n          \"to\" : [ \n            \"germanCity\" \n          ] \n        }, \n        { \n          \"collection\" : \"frenchHighway\", \n          \"from\" : [ \n            \"frenchCity\" \n          ], \n          \"to\" : [ \n            \"frenchCity\" \n          ] \n        }, \n        { \n          \"collection\" : \"internationalHighway\", \n          \"from\" : [ \n            \"frenchCity\", \n            \"germanCity\" \n          ], \n          \"to\" : [ \n            \"frenchCity\", \n            \"germanCity\" \n          ] \n        } \n      ] \n    } \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the module is available and the graphs could be listed.
" + } + }, + "summary": " List all graphs", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nThe creation of a graph requires the name of the graph and a definition of its edges. [See also edge definitions](../GeneralGraphs/Management.md#edge-definitions).
**A json post document with these Properties is required:**
  • orphanCollections: An array of additional vertex collections.
  • edgeDefinitions: An array of definitions for the edge
  • name: Name of the graph.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial <<EOF\n{ \n  \"name\" : \"myGraph\", \n  \"edgeDefinitions\" : [ \n    { \n      \"collection\" : \"edges\", \n      \"from\" : [ \n        \"startVertices\" \n      ], \n      \"to\" : [ \n        \"endVertices\" \n      ] \n    } \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 527817159\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"527817159\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_create_http_examples" + }, + "x-description-offset": 229 + } + ], + "responses": { + "201": { + "description": "Is returned if the graph could be created. The body contains the graph configuration that has been stored.
" + }, + "409": { + "description": "Returned if there is a conflict storing the graph. This can occur either if a graph with this name is already stored, or if there is one edge definition with a the same [edge collection](../Glossary/index.html#edge_collection) but a different signature used in any other graph.
" + } + }, + "summary": " Create a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}": { + "delete": { + "description": "\n\nRemoves a graph from the collection \\_graphs.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the graph could be dropped.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Drop a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets a graph from the collection \\_graphs. Returns the definition content of this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/myGraph\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 552131015\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"myGraph\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"edges\", \n        \"from\" : [ \n          \"startVertices\" \n        ], \n        \"to\" : [ \n          \"endVertices\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/myGraph\", \n    \"_rev\" : \"552131015\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the graph could be found.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Get a graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge": { + "get": { + "description": "\n\nLists all edge collections within this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/edge\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"collections\" : [ \n    \"relation\" \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the edge definitions could be listed.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " List edge definitions", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds an additional edge definition to the graph. This edge definition has to contain a collection and an array of each from and to vertex collections. An edge definition can only be added if this definition is either not used in any other graph, or it is used with exactly the same definition. It is not possible to store a definition \"e\" from \"v1\" to \"v2\" in the one graph, and \"e\" from \"v2\" to \"v1\" in the other graph.
**A json post document with these Properties is required:**
  • to: One or many edge collections that can contain target vertices. of type string
  • from: One or many vertex collections that can contain source vertices. of type string
  • collection: The name of the edge collection to be used.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge <<EOF\n{ \n  \"collection\" : \"lives_in\", \n  \"from\" : [ \n    \"female\", \n    \"male\" \n  ], \n  \"to\" : [ \n    \"city\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 514972103\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      }, \n      { \n        \"collection\" : \"lives_in\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"city\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"514972103\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_edge_definition_add_http_examples" + }, + "x-description-offset": 537 + } + ], + "responses": { + "200": { + "description": "Returned if the definition could be added successfully.
" + }, + "400": { + "description": "Returned if the defininition could not be added, the edge collection is used in an other graph with a different signature.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Add edge definition", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{collection-name}": { + "post": { + "description": "\n\nCreates a new edge in the collection. Within the body the has to contain a \\_from and \\_to value referencing to valid vertices in the graph. Furthermore the edge has to be valid in the definition of this [edge collection](../Glossary/index.html#edge_collection).
free style json body
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF\n{ \n  \"type\" : \"friend\", \n  \"_from\" : \"female/alice\", \n  \"_to\" : \"female/diana\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 513464775\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/513464775\", \n    \"_rev\" : \"513464775\", \n    \"_key\" : \"513464775\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 303 + } + ], + "responses": { + "201": { + "description": "Returned if the edge could be created.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + } + }, + "summary": " Create an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{collection-name}/{edge-key}": { + "delete": { + "description": "\n\nRemoves an edge from the collection.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge could be removed.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Remove an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets an edge from the given collection.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 549837255\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_key\" : \"aliceAndBob\", \n    \"_rev\" : \"549837255\", \n    \"_from\" : \"female/alice\", \n    \"_to\" : \"male/bob\", \n    \"type\" : \"married\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge could be found.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Get an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "patch": { + "description": "\n\nUpdates the data of the specific edge in the collection.
free style json body
Example:

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob <<EOF\n{ \n  \"since\" : \"01.01.2001\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 580639175\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_rev\" : \"580639175\", \n    \"_oldRev\" : \"579525063\", \n    \"_key\" : \"aliceAndBob\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be a JSON object containing the attributes to be updated.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 63 + } + ], + "responses": { + "200": { + "description": "Returned if the edge could be updated.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + } + }, + "summary": " Modify an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "put": { + "description": "\n\nReplaces the data of an edge in the collection.
free style json body
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation/aliceAndBob <<EOF\n{ \n  \"type\" : \"divorced\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 584505799\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"edge\" : { \n    \"_id\" : \"relation/aliceAndBob\", \n    \"_rev\" : \"584505799\", \n    \"_oldRev\" : \"583522759\", \n    \"_key\" : \"aliceAndBob\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 54 + } + ], + "responses": { + "200": { + "description": "Returned if the edge could be replaced.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no edge collection or no edge with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Replace an edge", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/edge/{definition-name}": { + "delete": { + "description": "\n\nRemove one edge definition from the graph. This will only remove the edge collection, the vertex collections remain untouched and can still be used in your queries.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/edge/relation\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 544659911\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ ], \n    \"orphanCollections\" : [ \n      \"female\", \n      \"male\" \n    ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"544659911\" \n  } \n}\n

\n

", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the edge definition could be removed from the graph.
" + }, + "400": { + "description": "Returned if no edge definition with this name is found in the graph.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Remove an edge definition from the graph", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nChange one specific edge definition. This will modify all occurrences of this definition in all graphs known to your database.
**A json post document with these Properties is required:**
  • to: One or many edge collections that can contain target vertices. of type string
  • from: One or many vertex collections that can contain source vertices. of type string
  • collection: The name of the edge collection to be used.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/edge/relation <<EOF\n{ \n  \"collection\" : \"relation\", \n  \"from\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ], \n  \"to\" : [ \n    \"female\", \n    \"male\", \n    \"animal\" \n  ] \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 593746375\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"animal\", \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"593746375\" \n  } \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_general_graph_edge_definition_modify_http_examples" + }, + "x-description-offset": 192 + } + ], + "responses": { + "200": { + "description": "Returned if the edge definition could be replaced.
" + }, + "400": { + "description": "Returned if no edge definition with this name is found in the graph.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Replace an edge definition", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex": { + "get": { + "description": "\n\nLists all vertex collections within this graph.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex\n\nHTTP/1.1 200 OK\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"collections\" : [ \n    \"female\", \n    \"male\" \n  ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the collections could be listed.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " List vertex collections", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds a vertex collection to the set of collections of the graph. If the collection does not exist, it will be created.

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex <<EOF\n{ \n  \"collection\" : \"otherVertices\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json\netag: 523426247\n\n{ \n  \"error\" : false, \n  \"code\" : 201, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ \n      \"otherVertices\" \n    ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"523426247\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "201": { + "description": "Returned if the edge collection could be added successfully.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Add vertex collection", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex/{collection-name}": { + "delete": { + "description": "\n\nRemoves a vertex collection from the graph and optionally deletes the collection, if it is not used in any other graph.

Example: /// You can remove vertex collections that are not used in any edge collection:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/otherVertices\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 588372423\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"graph\" : { \n    \"name\" : \"social\", \n    \"edgeDefinitions\" : [ \n      { \n        \"collection\" : \"relation\", \n        \"from\" : [ \n          \"female\", \n          \"male\" \n        ], \n        \"to\" : [ \n          \"female\", \n          \"male\" \n        ] \n      } \n    ], \n    \"orphanCollections\" : [ ], \n    \"_id\" : \"_graphs/social\", \n    \"_rev\" : \"588372423\" \n  } \n}\n

\n
Example: You cannot remove vertex collections that are used in edge collections:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/male\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1928, \n  \"errorMessage\" : \"not in orphan collection\" \n}\n

\n

", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex collection was removed from the graph successfully.
" + }, + "400": { + "description": "Returned if the vertex collection is still used in an edge definition. In this case it cannot be removed from the graph yet, it has to be removed from the edge definition first.
" + }, + "404": { + "description": "Returned if no graph with this name could be found.
" + } + }, + "summary": " Remove vertex collection", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "post": { + "description": "\n\nAdds a vertex to the given collection.
free style json body
Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/male <<EOF\n{ \n  \"name\" : \"Francis\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 521918919\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"male/521918919\", \n    \"_rev\" : \"521918919\", \n    \"_key\" : \"521918919\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 45 + } + ], + "responses": { + "201": { + "description": "Returned if the vertex could be added and waitForSync is true.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph or no vertex collection with this name could be found.
" + } + }, + "summary": " Create a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/gharial/{graph-name}/vertex/{collection-name}/{vertex-key}": { + "delete": { + "description": "\n\nRemoves a vertex from the collection.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"removed\" : true \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex could be removed.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Remove a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "get": { + "description": "\n\nGets a vertex from the given collection.

Example:

shell> curl --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice\n\nHTTP/1.1 200 OK\ncontent-type: application/json\netag: 553966023\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_key\" : \"alice\", \n    \"_rev\" : \"553966023\", \n    \"name\" : \"Alice\" \n  } \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "Returned if the vertex could be found.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Get a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "patch": { + "description": "\n\nUpdates the data of the specific vertex in the collection.
free style json body
Example:

shell> curl -X PATCH --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 576641479\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_rev\" : \"576641479\", \n    \"_oldRev\" : \"574478791\", \n    \"_key\" : \"alice\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to contain a JSON object containing exactly the attributes that should be replaced.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 65 + } + ], + "responses": { + "200": { + "description": "Returned if the vertex could be updated.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Modify a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + }, + "put": { + "description": "\n\nReplaces the data of a vertex in the collection.
free style json body
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/gharial/social/vertex/female/alice <<EOF\n{ \n  \"name\" : \"Alice Cooper\", \n  \"age\" : 26 \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: application/json\netag: 600496583\n\n{ \n  \"error\" : false, \n  \"code\" : 202, \n  \"vertex\" : { \n    \"_id\" : \"female/alice\", \n    \"_rev\" : \"600496583\", \n    \"_oldRev\" : \"598333895\", \n    \"_key\" : \"alice\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "The body has to be the JSON object to be stored.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 55 + } + ], + "responses": { + "200": { + "description": "Returned if the vertex could be replaced.
" + }, + "202": { + "description": "Returned if the request was successful but waitForSync is false.
" + }, + "404": { + "description": "Returned if no graph with this name, no vertex collection or no vertex with this id could be found.
" + }, + "412": { + "description": "Returned if if-match header is given, but the documents revision is different.
" + } + }, + "summary": " Replace a vertex", + "tags": [ + "Graph" + ], + "x-examples": [], + "x-filename": "Graph - js/apps/system/_api/gharial/APP/gharial.js" + } + }, + "/_api/import#document": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates documents in the collection identified by `collection-name`. The first line of the request body must contain a JSON-encoded array of attribute names. All following lines in the request body must contain JSON-encoded arrays of attribute values. Each line is interpreted as a separate document, and the values specified will be mapped to the array of attribute names specified in the first header line.
The response is a JSON object with the following attributes:
  • `created`: number of documents imported.
  • `errors`: number of documents that were not imported due to an error.
  • `empty`: number of empty lines found in the input (will only contain a value greater zero for types `documents` or `auto`).
  • `updated`: number of updated/replaced documents (in case `onDuplicate` was set to either `update` or `replace`).
  • `ignored`: number of failed but ignored insert operations (in case `onDuplicate` was set to `ignore`).
  • `details`: if URL parameter `details` is set to true, the result will contain a `details` attribute which is an array with more detailed information about which documents could not be inserted.
Note: this API is currently not supported on cluster coordinators.

Example: Importing two documents, with attributes `_key`, `value1` and `value2` each. One line in the import data is empty

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n\n[ \"foo\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 1, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing two documents into a new collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&createCollection=true <<EOF\n[ \"value1\", \"value2\" ]\n[ 1234, null ]\n[ \"foo\", \"bar\" ]\n[ 534.55, true ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, with attributes `_from`, `_to` and `name`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links <<EOF\n[ \"_from\", \"_to\", \"name\" ]\n[ \"products/123\", \"products/234\", \"some name\" ]\n[ \"products/332\", \"products/abc\", \"other name\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, omitting `_from` or `_to`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&details=true <<EOF\n[ \"name\" ]\n[ \"some name\" ]\n[ \"other name\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 0, \n  \"errors\" : 2, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 1: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"some name\\\"}\", \n    \"at position 2: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"other name\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, but allow partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&details=true <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"abc\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 1, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 2: creating document failed with error 'unique constraint violated', offending document: {\\\"_key\\\":\\\"abc\\\",\\\"value1\\\":\\\"bar\\\",\\\"value2\\\":\\\"baz\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, not allowing partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&complete=true <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"abc\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 409 Conflict\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cannot create document, unique constraint violated\", \n  \"code\" : 409, \n  \"errorNum\" : 1210 \n}\n

\n
Example: Using a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n[ \"_key\", \"value1\", \"value2\" ]\n[ \"abc\", 25, \"test\" ]\n[ \"foo\", \"bar\", \"baz\" ]\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Using a malformed body

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products <<EOF\n{ \"_key\": \"foo\", \"value1\": \"bar\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"no JSON array found in second line\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
", + "parameters": [ + { + "description": "The body must consist of JSON-encoded arrays of attribute values, with one line per document. The first row of the request must be a JSON-encoded array of attribute names. These attribute names are used for the data in the subsequent lines.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then all data in the collection will be removed prior to the import. Note that any existing index definitions will be preseved.
", + "in": "query", + "name": "overwrite", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until documents have been synced to disk before returning.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Controls what action is carried out in case of a unique key constraint violation. Possible values are:
  • error: this will not import the current document because of the unique key constraint violation. This is the default setting.
  • update: this will update an existing document in the database with the data specified in the request. Attributes of the existing document that are not present in the request will be preseved.
  • replace: this will replace an existing document in the database with the data specified in the request.
  • ignore: this will not update an existing document and simply ignore the error caused by the unique key constraint violation.
Note that update, replace and ignore will only work when the import document in the request contains the _key attribute. update and replace may also fail because of secondary unique key constraint violations.
", + "in": "query", + "name": "onDuplicate", + "required": false, + "type": "string" + }, + { + "description": "If set to `true` or `yes`, it will make the whole import fail if any error occurs. Otherwise the import will continue even if some documents cannot be imported.
", + "in": "query", + "name": "complete", + "required": false, + "type": "boolean" + }, + { + "description": "If set to `true` or `yes`, the result will include an attribute `details` with details about documents that could not be imported.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if all documents could be imported successfully.
" + }, + "400": { + "description": "is returned if `type` contains an invalid value, no `collection` is specified, the documents are incorrectly encoded, or the request is malformed.
" + }, + "404": { + "description": "is returned if `collection` or the `_from` or `_to` attributes of an imported edge refer to an unknown collection.
" + }, + "409": { + "description": "is returned if the import would trigger a unique key violation and `complete` is set to `true`.
" + }, + "500": { + "description": "is returned if the server cannot auto-generate a document key (out of keys error) for a document with no user-defined key.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.
" + } + }, + "summary": "imports document values", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/import#json": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates documents in the collection identified by `collection-name`. The JSON representations of the documents must be passed as the body of the POST request. The request body can either consist of multiple lines, with each line being a single stand-alone JSON object, or a singe JSON array with sub-objects.
The response is a JSON object with the following attributes:
  • `created`: number of documents imported.
  • `errors`: number of documents that were not imported due to an error.
  • `empty`: number of empty lines found in the input (will only contain a value greater zero for types `documents` or `auto`).
  • `updated`: number of updated/replaced documents (in case `onDuplicate` was set to either `update` or `replace`).
  • `ignored`: number of failed but ignored insert operations (in case `onDuplicate` was set to `ignore`).
  • `details`: if URL parameter `details` is set to true, the result will contain a `details` attribute which is an array with more detailed information about which documents could not be inserted.
Note: this API is currently not supported on cluster coordinators.

Example: Importing documents with heterogenous attributes from a JSON array

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF\n[ \n  { \n    \"_key\" : \"abc\", \n    \"value1\" : 25, \n    \"value2\" : \"test\", \n    \"allowed\" : true \n  }, \n  { \n    \"_key\" : \"foo\", \n    \"name\" : \"baz\" \n  }, \n  { \n    \"name\" : { \n      \"detailed\" : \"detailed name\", \n      \"short\" : \"short name\" \n    } \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing documents from individual JSON lines

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\", \"allowed\": true }\n{ \"_key\": \"foo\", \"name\": \"baz\" }\n\n{ \"name\": { \"detailed\": \"detailed name\", \"short\": \"short name\" } }\n\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 1, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Using the auto type detection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=auto <<EOF\n[ \n  { \n    \"_key\" : \"abc\", \n    \"value1\" : 25, \n    \"value2\" : \"test\", \n    \"allowed\" : true \n  }, \n  { \n    \"_key\" : \"foo\", \n    \"name\" : \"baz\" \n  }, \n  { \n    \"name\" : { \n      \"detailed\" : \"detailed name\", \n      \"short\" : \"short name\" \n    } \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing documents into a new collection from a JSON array

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&createCollection=true&type=list <<EOF\n[ \n  { \n    \"id\" : \"12553\", \n    \"active\" : true \n  }, \n  { \n    \"id\" : \"4433\", \n    \"active\" : false \n  }, \n  { \n    \"id\" : \"55932\", \n    \"count\" : 4334 \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 3, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, with attributes `_from`, `_to` and `name`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=documents <<EOF\n{ \"_from\": \"products/123\", \"_to\": \"products/234\" }\n{ \"_from\": \"products/332\", \"_to\": \"products/abc\", \"name\": \"other name\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 2, \n  \"errors\" : 0, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0 \n}\n

\n
Example: Importing into an edge collection, omitting `_from` or `_to`

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=links&type=list&details=true <<EOF\n[ \n  { \n    \"name\" : \"some name\" \n  } \n]\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 0, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 1: missing '_from' or '_to' attribute, offending document: {\\\"name\\\":\\\"some name\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, but allow partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&details=true <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\" }\n{ \"_key\": \"abc\", \"value1\": \"bar\", \"value2\": \"baz\" }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"created\" : 1, \n  \"errors\" : 1, \n  \"empty\" : 0, \n  \"updated\" : 0, \n  \"ignored\" : 0, \n  \"details\" : [ \n    \"at position 2: creating document failed with error 'unique constraint violated', offending document: {\\\"_key\\\":\\\"abc\\\",\\\"value1\\\":\\\"bar\\\",\\\"value2\\\":\\\"baz\\\"}\" \n  ] \n}\n

\n
Example: Violating a unique constraint, not allowing partial imports

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents&complete=true <<EOF\n{ \"_key\": \"abc\", \"value1\": 25, \"value2\": \"test\" }\n{ \"_key\": \"abc\", \"value1\": \"bar\", \"value2\": \"baz\" }\nEOF\n\nHTTP/1.1 409 Conflict\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"cannot create document, unique constraint violated\", \n  \"code\" : 409, \n  \"errorNum\" : 1210 \n}\n

\n
Example: Using a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=documents <<EOF\n{ \"name\": \"test\" }\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"collection 'products' not found\", \n  \"code\" : 404, \n  \"errorNum\" : 1203 \n}\n

\n
Example: Using a malformed body

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/import?collection=products&type=list <<EOF\n{ }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"expecting a JSON array in the request\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
", + "parameters": [ + { + "description": "The body must either be a JSON-encoded array of objects or a string with multiple JSON objects separated by newlines.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + }, + { + "description": "Determines how the body of the request will be interpreted. `type` can have the following values:
  • `documents`: when this type is used, each line in the request body is expected to be an individual JSON-encoded document. Multiple JSON objects in the request body need to be separated by newlines.
  • `list`: when this type is used, the request body must contain a single JSON-encoded array of individual objects to import.
  • `auto`: if set, this will automatically determine the body type (either `documents` or `list`).
", + "in": "query", + "name": "type", + "required": true, + "type": "string" + }, + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then the collection is created if it does not yet exist. Other values will be ignored so the collection must be present for the operation to succeed.
", + "in": "query", + "name": "createCollection", + "required": false, + "type": "boolean" + }, + { + "description": "If this parameter has a value of `true` or `yes`, then all data in the collection will be removed prior to the import. Note that any existing index definitions will be preseved.
", + "in": "query", + "name": "overwrite", + "required": false, + "type": "boolean" + }, + { + "description": "Wait until documents have been synced to disk before returning.
", + "in": "query", + "name": "waitForSync", + "required": false, + "type": "boolean" + }, + { + "description": "Controls what action is carried out in case of a unique key constraint violation. Possible values are:
  • error: this will not import the current document because of the unique key constraint violation. This is the default setting.
  • update: this will update an existing document in the database with the data specified in the request. Attributes of the existing document that are not present in the request will be preseved.
  • replace: this will replace an existing document in the database with the data specified in the request.
  • ignore: this will not update an existing document and simply ignore the error caused by a unique key constraint violation.
Note that that update, replace and ignore will only work when the import document in the request contains the _key attribute. update and replace may also fail because of secondary unique key constraint violations.
", + "in": "query", + "name": "onDuplicate", + "required": false, + "type": "string" + }, + { + "description": "If set to `true` or `yes`, it will make the whole import fail if any error occurs. Otherwise the import will continue even if some documents cannot be imported.
", + "in": "query", + "name": "complete", + "required": false, + "type": "boolean" + }, + { + "description": "If set to `true` or `yes`, the result will include an attribute `details` with details about documents that could not be imported.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "responses": { + "201": { + "description": "is returned if all documents could be imported successfully.
" + }, + "400": { + "description": "is returned if `type` contains an invalid value, no `collection` is specified, the documents are incorrectly encoded, or the request is malformed.
" + }, + "404": { + "description": "is returned if `collection` or the `_from` or `_to` attributes of an imported edge refer to an unknown collection.
" + }, + "409": { + "description": "is returned if the import would trigger a unique key violation and `complete` is set to `true`.
" + }, + "500": { + "description": "is returned if the server cannot auto-generate a document key (out of keys error) for a document with no user-defined key.
" + }, + "501": { + "description": "The server will respond with HTTP 501 if this API is called on a cluster coordinator.
" + } + }, + "summary": "imports documents from JSON", + "tags": [ + "Bulk" + ], + "x-examples": [], + "x-filename": "Bulk - arangod/RestHandler/RestExportHandler.cpp, arangod/RestHandler/RestImportHandler.cpp, arangod/RestHandler/RestBatchHandler.cpp" + } + }, + "/_api/index": { + "get": { + "description": "\n\n
Returns an object with an attribute indexes containing an array of all index descriptions for the given collection. The same information is also available in the identifiers as an object with the index handles as keys.

Example: Return information about all indexes

shell> curl --dump - http://localhost:8529/_api/index?collection=products\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"indexes\" : [ \n    { \n      \"id\" : \"products/0\", \n      \"type\" : \"primary\", \n      \"fields\" : [ \n        \"_key\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : true, \n      \"sparse\" : false \n    }, \n    { \n      \"id\" : \"products/758438343\", \n      \"type\" : \"hash\", \n      \"fields\" : [ \n        \"name\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : false, \n      \"sparse\" : false \n    }, \n    { \n      \"id\" : \"products/758700487\", \n      \"type\" : \"skiplist\", \n      \"fields\" : [ \n        \"price\" \n      ], \n      \"unique\" : false, \n      \"sparse\" : true \n    } \n  ], \n  \"identifiers\" : { \n    \"products/0\" : { \n      \"id\" : \"products/0\", \n      \"type\" : \"primary\", \n      \"fields\" : [ \n        \"_key\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : true, \n      \"sparse\" : false \n    }, \n    \"products/758438343\" : { \n      \"id\" : \"products/758438343\", \n      \"type\" : \"hash\", \n      \"fields\" : [ \n        \"name\" \n      ], \n      \"selectivityEstimate\" : 1, \n      \"unique\" : false, \n      \"sparse\" : false \n    }, \n    \"products/758700487\" : { \n      \"id\" : \"products/758700487\", \n      \"type\" : \"skiplist\", \n      \"fields\" : [ \n        \"price\" \n      ], \n      \"unique\" : false, \n      \"sparse\" : true \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "returns a json object containing a list of indexes on that collection.
" + } + }, + "summary": " Read all indexes of a collection", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + }, + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute paths. of type string
  • unique: if true, then create a unique index.
  • type: must be equal to \"skiplist\".
  • sparse: if true, then create a sparse index.
\n\n
Creates a skip-list index for the collection collection-name, if it does not already exist. The call expects an object containing the index details.
In a sparse index all documents will be excluded from the index that do not contain at least one of the specified index attributes (i.e. fields) or that have a value of null in any of the specified index attributes. Such documents will not be indexed, and not be taken into account for uniqueness checks if the unique flag is set.
In a non-sparse index, these documents will be indexed (for non-present indexed attributes, a value of null will be used) and will be taken into account for uniqueness checks if the unique flag is set.
Note: unique indexes on non-shard keys are not supported in a cluster.

Example: Creating a skiplist index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"skiplist\", \n  \"unique\" : false, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/761715143\", \n  \"type\" : \"skiplist\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a sparse skiplist index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"skiplist\", \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"fields\" : [ \n    \"a\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/763222471\", \n  \"type\" : \"skiplist\", \n  \"fields\" : [ \n    \"a\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.

", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_skiplist" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "400": { + "description": "If the collection already contains documents and you try to create a unique skip-list index in such a way that there are documents violating the uniqueness, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create skip list", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#CapConstraints": { + "post": { + "description": "**A json post document with these Properties is required:**
  • byteSize: The maximal size of the active document data in the collection (in bytes). If specified, the value must be at least 16384.

  • type: must be equal to \"cap\".
  • size: The maximal number of documents for the collection. If specified, the value must be greater than zero.
\n\nNOTE Swagger examples won't work due to the anchor.


Creates a cap constraint for the collection collection-name, if it does not already exist. Expects an object containing the index details.
Note: The cap constraint does not index particular attributes of the documents in a collection, but limits the number of documents in the collection to a maximum value. The cap constraint thus does not support attribute names specified in the fields attribute nor uniqueness of any kind via the unique attribute.
It is allowed to specify either size or byteSize, or both at the same time. If both are specified, then the automatic document removal will be triggered by the first non-met constraint.

Example: Creating a cap constraint

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"cap\", \n  \"size\" : 10 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/760142279\", \n  \"type\" : \"cap\", \n  \"size\" : 10, \n  \"byteSize\" : 0, \n  \"unique\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_cap" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then an HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then an HTTP 201 is returned.
" + }, + "400": { + "description": "If either size or byteSize contain invalid values, then an HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create cap constraint", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#fulltext": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute names. Currently, the array is limited to exactly one attribute. of type string
  • type: must be equal to \"fulltext\".
  • minLength: Minimum character length of words to index. Will default to a server-defined value if unspecified. It is thus recommended to set this value explicitly when creating the index.
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a fulltext index for the collection collection-name, if it does not already exist. The call expects an object containing the index details.

Example: Creating a fulltext index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"fulltext\", \n  \"fields\" : [ \n    \"text\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/760601031\", \n  \"type\" : \"fulltext\", \n  \"fields\" : [ \n    \"text\" \n  ], \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"minLength\" : 2, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_fulltext" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create fulltext index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#general": { + "post": { + "description": "free style json body\n\nNOTE Swagger examples won't work due to the anchor.

Creates a new index in the collection collection. Expects an object containing the index details.
The type of the index to be created must specified in the type attribute of the index details. Depending on the index type, additional other attributes may need to specified in the request in order to create the index.
Most indexes (a notable exception being the cap constraint) require the array of attributes to be indexed in the fields attribute of the index details. Depending on the index type, a single attribute or multiple attributes can be indexed.
Indexing system attributes such as _id, _key, _from, and _to is not supported for user-defined indexes. Manually creating an index using any of these attributes will fail with an error.
Some indexes can be created as unique or non-unique variants. Uniqueness can be controlled for most indexes by specifying the unique flag in the index details. Setting it to true will create a unique index. Setting it to false or omitting the unique attribute will create a non-unique index.
Note: The following index types do not support uniqueness, and using the unique attribute with these types may lead to an error:
  • cap constraints
  • fulltext indexes
Note: Unique indexes on non-shard keys are not supported in a cluster.
Hash and skiplist indexes can optionally be created in a sparse variant. A sparse index will be created if the sparse attribute in the index details is set to true. Sparse indexes do not index documents for which any of the index attributes is either not set or is null.
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then an HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then an HTTP 201 is returned.
" + }, + "400": { + "description": "If an invalid index description is posted or attributes are used that the target index will not support, then an HTTP 400 is returned.
" + }, + "404": { + "description": "If collection is unknown, then an HTTP 404 is returned.
" + } + }, + "summary": " Create index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#geo": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: An array with one or two attribute paths.
    If it is an array with one attribute path location, then a geo-spatial index on all documents is created using location as path to the coordinates. The value of the attribute must be an array with at least two double values. The array must contain the latitude (first value) and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
    If it is an array with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. of type string
  • type: must be equal to \"geo\".
  • geoJson: If a geo-spatial index on a location is constructed and geoJson is true, then the order within the array is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a geo-spatial index in the collection collection-name, if it does not already exist. Expects an object containing the index details.
Geo indexes are always sparse, meaning that documents that do not contain the index attributes or have non-numeric values in the index attributes will not be indexed.

Example: Creating a geo index with a location attribute

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"geo\", \n  \"fields\" : [ \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/759749063\", \n  \"type\" : \"geo1\", \n  \"fields\" : [ \n    \"b\" \n  ], \n  \"geoJson\" : false, \n  \"constraint\" : false, \n  \"unique\" : false, \n  \"ignoreNull\" : true, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a geo index with latitude and longitude attributes

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"geo\", \n  \"fields\" : [ \n    \"e\", \n    \"f\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/759290311\", \n  \"type\" : \"geo2\", \n  \"fields\" : [ \n    \"e\", \n    \"f\" \n  ], \n  \"constraint\" : false, \n  \"unique\" : false, \n  \"ignoreNull\" : true, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.

", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_geo" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create geo-spatial index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index#hash": { + "post": { + "description": "**A json post document with these Properties is required:**
  • fields: an array of attribute paths. of type string
  • unique: if true, then create a unique index.
  • type: must be equal to \"hash\".
  • sparse: if true, then create a sparse index.
\n\nNOTE Swagger examples won't work due to the anchor.

Creates a hash index for the collection collection-name if it does not already exist. The call expects an object containing the index details.
In a sparse index all documents will be excluded from the index that do not contain at least one of the specified index attributes (i.e. fields) or that have a value of null in any of the specified index attributes. Such documents will not be indexed, and not be taken into account for uniqueness checks if the unique flag is set.
In a non-sparse index, these documents will be indexed (for non-present indexed attributes, a value of null will be used) and will be taken into account for uniqueness checks if the unique flag is set.
Note: unique indexes on non-shard keys are not supported in a cluster.

Example: Creating an unique constraint

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : true, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/762239431\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : true, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a non-unique hash index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : false, \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/761190855\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\", \n    \"b\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : false, \n  \"sparse\" : false, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Creating a sparse index

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/index?collection=products <<EOF\n{ \n  \"type\" : \"hash\", \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"fields\" : [ \n    \"a\" \n  ] \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/762698183\", \n  \"type\" : \"hash\", \n  \"fields\" : [ \n    \"a\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : false, \n  \"sparse\" : true, \n  \"isNewlyCreated\" : true, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "The collection name.
", + "in": "query", + "name": "collection-name", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_index_hash" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the index already exists, then a HTTP 200 is returned.
" + }, + "201": { + "description": "If the index does not already exist and could be created, then a HTTP 201 is returned.
" + }, + "400": { + "description": "If the collection already contains documents and you try to create a unique hash index in such a way that there are documents violating the uniqueness, then a HTTP 400 is returned.
" + }, + "404": { + "description": "If the collection-name is unknown, then a HTTP 404 is returned.
" + } + }, + "summary": " Create hash index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/index/{index-handle}": { + "delete": { + "description": "\n\n
Deletes an index with index-handle.

Example:

shell> curl -X DELETE --dump - http://localhost:8529/_api/index/products/763746759\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/763746759\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The index handle.
", + "format": "string", + "in": "path", + "name": "index-handle", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the index could be deleted, then an HTTP 200 is returned.
" + }, + "404": { + "description": "If the index-handle is unknown, then an HTTP 404 is returned." + } + }, + "summary": " Delete index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + }, + "get": { + "description": "\n\n
The result is an object describing the index. It has at least the following attributes:
  • id: the identifier of the index
  • type: the index type
All other attributes are type-dependent. For example, some indexes provide unique or sparse flags, whereas others don't. Some indexes also provide a selectivity estimate in the selectivityEstimate attribute of the result.

Example:

shell> curl --dump - http://localhost:8529/_api/index/products/0\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"products/0\", \n  \"type\" : \"primary\", \n  \"fields\" : [ \n    \"_key\" \n  ], \n  \"selectivityEstimate\" : 1, \n  \"unique\" : true, \n  \"sparse\" : false, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The index-handle.
", + "format": "string", + "in": "path", + "name": "index-handle", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "If the index exists, then a HTTP 200 is returned.
" + }, + "404": { + "description": "If the index does not exist, then a HTTP 404 is returned.
" + } + }, + "summary": "Read index", + "tags": [ + "Indexes" + ], + "x-examples": [], + "x-filename": "Indexes - js/actions/api-index.js" + } + }, + "/_api/job/{job-id}": { + "get": { + "description": "\n\nReturns the processing status of the specified job. The processing status can be determined by peeking into the HTTP response code of the response.

Example: Querying the status of a done job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603314631\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/603314631\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 603314631\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Querying the status of a pending job: (we create a sleep job therefore...)

shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_admin/sleep?duration=30\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603380167\n\nshell> curl --dump - http://localhost:8529/_api/job/603380167\n\nHTTP/1.1 204 No Content\ncontent-type: text/plain; charset=utf-8\n\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the job requested via job-id has been executed and its result is ready to fetch.
" + }, + "204": { + "description": "is returned if the job requested via job-id is still in the queue of pending (or not yet finished) jobs.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list.
" + } + }, + "summary": " Returns async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + }, + "put": { + "description": "\n\nReturns the result of an async job identified by job-id. If the async job result is present on the server, the result will be removed from the list of result. That means this method can be called for each job-id once. The method will return the original job result's headers and body, plus the additional HTTP header x-arango-async-job-id. If this header is present, then the job was found and the response contains the original job's result. If the header is not present, the job was not found and the response contains status information from the job manager.

Example: Not providing a job-id:

shell> curl -X PUT --dump - http://localhost:8529/_api/job\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"bad parameter\", \n  \"code\" : 400, \n  \"errorNum\" : 400 \n}\n

\n
Example: Providing a job-id for a non-existing job:

shell> curl -X PUT --dump - http://localhost:8529/_api/job/notthere\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"not found\", \n  \"code\" : 404, \n  \"errorNum\" : 404 \n}\n

\n
Example: Fetching the result of an HTTP GET job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602986951\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/602986951\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 602986951\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Fetching the result of an HTTP POST job that failed:

shell> curl -X PUT --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/collection <<EOF\n{ \n  \"name\" : \" this name is invalid \" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603052487\n\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/603052487\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\nx-arango-async-id: 603052487\n\n{ \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 400, \n  \"errorMessage\" : \"expected PUT /_api/collection/<collection-name>/<action>\" \n}\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the job requested via job-id is still in the queue of pending (or not yet finished) jobs. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "400": { + "description": "is returned if no job-id was specified in the request. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list. In this case, no x-arango-async-id HTTP header will be returned.
" + } + }, + "summary": " Return result of an async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/job/{job-id}/cancel": { + "put": { + "description": "\n\nCancels the currently running job identified by job-id. Note that it still might take some time to actually cancel the running async job.

Example:

shell> curl -X POST --header 'x-arango-async: store' --data-binary @- --dump - http://localhost:8529/_api/cursor <<EOF\n{ \n  \"query\" : \"FOR i IN 1..10 FOR j IN 1..10 LET x = sleep(1.0) FILTER i == 5 && j == 5 RETURN 42\" \n}\nEOF\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602659271\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"602659271\" \n]\nshell> curl -X PUT --dump - http://localhost:8529/_api/job/602659271/cancel\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
", + "parameters": [ + { + "description": "The async job id.
", + "format": "string", + "in": "path", + "name": "job-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "cancel has been initiated.
" + }, + "400": { + "description": "is returned if no job-id was specified in the request. In this case, no x-arango-async-id HTTP header will be returned.
" + }, + "404": { + "description": "is returned if the job was not found or already deleted or fetched from the job result list. In this case, no x-arango-async-id HTTP header will be returned.
" + } + }, + "summary": " Cancel async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/job/{type}": { + "delete": { + "description": "\n\nDeletes either all job results, expired job results, or the result of a specific job. Clients can use this method to perform an eventual garbage collection of job results.

Example: Deleting all jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602790343\n\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/all\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
Example: Deleting expired jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602855879\n\nshell> curl --dump - http://localhost:8529/_admin/time\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"time\" : 1443627576.40017, \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/expired?stamp=1443627576.40017\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
Example: Deleting the result of a specific job:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 602921415\n\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/602921415\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
Example: Deleting the result of a non-existing job:

shell> curl -X DELETE --dump - http://localhost:8529/_api/job/AreYouThere\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"not found\", \n  \"code\" : 404, \n  \"errorNum\" : 404 \n}\n

\n
", + "parameters": [ + { + "description": "The type of jobs to delete. type can be: *all: Deletes all jobs results. Currently executing or queued async jobs will not be stopped by this call. *expired: Deletes expired results. To determine the expiration status of a result, pass the stamp URL parameter. stamp needs to be a UNIX timestamp, and all async job results created at a lower timestamp will be deleted. *an actual job-id: In this case, the call will remove the result of the specified async job. If the job is currently executing or queued, it will not be aborted.
", + "format": "string", + "in": "path", + "name": "type", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the deletion operation was carried out successfully. This code will also be returned if no results were deleted.
" + }, + "400": { + "description": "is returned if type is not specified or has an invalid value.
" + }, + "404": { + "description": "is returned if type is a job-id but no async job with the specified id was found.
" + } + }, + "summary": " Deletes async job", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + }, + "get": { + "description": "\n\nReturns the list of ids of async jobs with a specific status (either done or pending). The list can be used by the client to get an overview of the job system status and to retrieve completed job results later.

Example: Fetching the list of done jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603118023\n\nshell> curl --dump - http://localhost:8529/_api/job/done\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"603118023\" \n]\n

\n
Example: Fetching the list of pending jobs:

shell> curl -X PUT --header 'x-arango-async: store' --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603183559\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ ]\n

\n
Example: Querying the status of a pending job: (we create a sleep job therefore...)

shell> curl --header 'x-arango-async: store' --dump - http://localhost:8529/_admin/sleep?duration=30\n\nHTTP/1.1 202 Accepted\ncontent-type: text/plain; charset=utf-8\nx-arango-async-id: 603249095\n\nshell> curl --dump - http://localhost:8529/_api/job/pending\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  \"603249095\" \n]\nshell> curl -X DELETE --dump - http://localhost:8529/_api/job/603249095\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : true \n}\n

\n
", + "parameters": [ + { + "description": "The type of jobs to return. The type can be either done or pending. Setting the type to done will make the method return the ids of already completed async jobs for which results can be fetched. Setting the type to pending will return the ids of not yet finished async jobs.
", + "format": "string", + "in": "path", + "name": "type", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the list can be compiled successfully. Note: the list might be empty.
" + }, + "400": { + "description": "is returned if type is not specified or has an invalid value.
" + } + }, + "summary": " Returns list of async jobs", + "tags": [ + "job" + ], + "x-examples": [], + "x-filename": "job - arangod/HttpServer/AsyncJobManager.cpp, arangod/RestHandler/RestJobHandler.cpp" + } + }, + "/_api/query": { + "post": { + "description": "**A json post document with these Properties is required:**
  • query: To validate a query string without executing it, the query string can be passed to the server via an HTTP POST request.

Example: a Valid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/query <<EOF\n{ \"query\" : \"FOR p IN products FILTER p.name == @name LIMIT 2 RETURN p.n\" }\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200, \n  \"parsed\" : true, \n  \"collections\" : [ \n    \"products\" \n  ], \n  \"bindVars\" : [ \n    \"name\" \n  ], \n  \"ast\" : [ \n    { \n      \"type\" : \"root\", \n      \"subNodes\" : [ \n        { \n          \"type\" : \"for\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"variable\", \n              \"name\" : \"p\", \n              \"id\" : 0 \n            }, \n            { \n              \"type\" : \"collection\", \n              \"name\" : \"products\" \n            } \n          ] \n        }, \n        { \n          \"type\" : \"filter\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"compare ==\", \n              \"subNodes\" : [ \n                { \n                  \"type\" : \"attribute access\", \n                  \"name\" : \"name\", \n                  \"subNodes\" : [ \n                    { \n                      \"type\" : \"reference\", \n                      \"name\" : \"p\", \n                      \"id\" : 0 \n                    } \n                  ] \n                }, \n                { \n                  \"type\" : \"parameter\", \n                  \"name\" : \"name\" \n                } \n              ] \n            } \n          ] \n        }, \n        { \n          \"type\" : \"limit\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"value\", \n              \"value\" : 0 \n            }, \n            { \n              \"type\" : \"value\", \n              \"value\" : 2 \n            } \n          ] \n        }, \n        { \n          \"type\" : \"return\", \n          \"subNodes\" : [ \n            { \n              \"type\" : \"attribute access\", \n              \"name\" : \"n\", \n              \"subNodes\" : [ \n                { \n                  \"type\" : \"reference\", \n                  \"name\" : \"p\", \n                  \"id\" : 0 \n                } \n              ] \n            } \n          ] \n        } \n      ] \n    } \n  ], \n  \"warnings\" : [ ] \n}\n

\n
Example: an Invalid query

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/query <<EOF\n{ \"query\" : \"FOR p IN products FILTER p.name = @name LIMIT 2 RETURN p.n\" }\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"errorMessage\" : \"syntax error, unexpected assignment near '= @name LIMIT 2 RETURN p.n' at position 1:33\", \n  \"code\" : 400, \n  \"errorNum\" : 1501 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PostApiQueryProperties" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the query is valid, the server will respond with HTTP 200 and return the names of the bind parameters it found in the query (if any) in the bindVars attribute of the response. It will also return an array of the collections used in the query in the collections attribute. If a query can be parsed successfully, the ast attribute of the returned JSON will contain the abstract syntax tree representation of the query. The format of the ast is subject to change in future versions of ArangoDB, but it can be used to inspect how ArangoDB interprets a given query. Note that the abstract syntax tree will be returned without any optimizations applied to it.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request, or if the query contains a parse error. The body of the response will contain the error details embedded in a JSON object.
" + } + }, + "summary": " Parse an AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query-cache": { + "delete": { + "description": "\n\nclears the query cache", + "parameters": [], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the cache was cleared successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + } + }, + "summary": " Clears any results in the AQL query cache", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query-cache/properties": { + "get": { + "description": "\n\nReturns the global AQL query cache configuration. The configuration is a JSON object with the following properties:
  • mode: the mode the AQL query cache operates in. The mode is one of the following values: off, on or demand.
  • maxResults: the maximum number of query results that will be stored per database-specific cache.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if the properties can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the global properties for the AQL query cache", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put": { + "description": "\n\nAfter the properties have been changed, the current set of properties will be returned in the HTTP response.
Note: changing the properties may invalidate all results in the cache. The global properties for AQL query cache. The properties need to be passed in the attribute properties in the body of the HTTP request. properties needs to be a JSON object with the following properties:
**A json post document with these Properties is required:**
  • mode: the mode the AQL query cache should operate in. Possible values are off, on or demand.
  • maxResults: the maximum number of query results that will be stored per database-specific cache.

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PutApiQueryCacheProperties" + }, + "x-description-offset": 489 + } + ], + "responses": { + "200": { + "description": "Is returned if the properties were changed successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Globally adjusts the AQL query result cache properties", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/current": { + "get": { + "description": "\n\nReturns an array containing the AQL queries currently running in the selected database. Each query is a JSON object with the following attributes:
  • id: the query's id
  • query: the query string (potentially truncated)
  • started: the date and time when the query was started
  • runTime: the query's run time up to the point the list of queries was queried
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned when the list of queries can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the currently running AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/properties": { + "get": { + "description": "\n\nReturns the current query tracking configuration. The configuration is a JSON object with the following properties:
  • enabled: if set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
  • trackSlowQueries: if set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
  • maxSlowQueries: the maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
  • slowQueryThreshold: the threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
  • maxQueryStringLength: the maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned if properties were retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the properties for the AQL query tracking", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • slowQueryThreshold: The threshold value for treating a query as slow. A query with a runtime greater or equal to this threshold value will be put into the list of slow queries when slow query tracking is enabled. The value for slowQueryThreshold is specified in seconds.
  • enabled: If set to true, then queries will be tracked. If set to false, neither queries nor slow queries will be tracked.
  • maxSlowQueries: The maximum number of slow queries to keep in the list of slow queries. If the list of slow queries is full, the oldest entry in it will be discarded when additional slow queries occur.
  • trackSlowQueries: If set to true, then slow queries will be tracked in the list of slow queries if their runtime exceeds the value set in slowQueryThreshold. In order for slow queries to be tracked, the enabled property must also be set to true.
  • maxQueryStringLength: The maximum query string length to keep in the list of queries. Query strings can have arbitrary lengths, and this property can be used to save memory in case very long query strings are used. The value is specified in bytes.
\n\nThe properties need to be passed in the attribute properties in the body of the HTTP request. properties needs to be a JSON object.
After the properties have been changed, the current set of properties will be returned in the HTTP response.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/PutApiQueryProperties" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "Is returned if the properties were changed successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Changes the properties for the AQL query tracking", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/slow": { + "delete": { + "description": "\n\nClears the list of slow AQL queries
", + "parameters": [], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the list of queries was cleared successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + } + }, + "summary": " Clears the list of slow AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + }, + "get": { + "description": "\n\nReturns an array containing the last AQL queries that exceeded the slow query threshold in the selected database. The maximum amount of queries in the list can be controlled by setting the query tracking property `maxSlowQueries`. The threshold for treating a query as slow can be adjusted by setting the query tracking property `slowQueryThreshold`.
Each query is a JSON object with the following attributes:
  • id: the query's id
  • query: the query string (potentially truncated)
  • started: the date and time when the query was started
  • runTime: the query's run time up to the point the list of queries was queried
", + "parameters": [], + "responses": { + "200": { + "description": "Is returned when the list of queries can be retrieved successfully.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request,

" + } + }, + "summary": " Returns the list of slow AQL queries", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/query/{query-id}": { + "delete": { + "description": "\n\nKills a running query. The query will be terminated at the next cancelation point.
", + "parameters": [ + { + "description": "The id of the query.
", + "format": "string", + "in": "path", + "name": "query-id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The server will respond with HTTP 200 when the query was still running when the kill request was executed and the query's kill flag was set.
" + }, + "400": { + "description": "The server will respond with HTTP 400 in case of a malformed request.
" + }, + "404": { + "description": "The server will respond with HTTP 404 when no query with the specified id was found.
" + } + }, + "summary": " Kills a running AQL query", + "tags": [ + "AQL" + ], + "x-examples": [], + "x-filename": "AQL - arangod/RestHandler/RestQueryHandler.cpp, js/actions/api-aqlfunction.js, js/actions/api-explain.js, arangod/RestHandler/RestQueryCacheHandler.cpp" + } + }, + "/_api/replication/applier-config": { + "get": { + "description": "\n\nReturns the configuration of the replication applier.
The body of the response is a JSON object with the configuration. The following attributes may be present in the configuration:
  • endpoint: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • database: the name of the database to connect to (e.g. \"_system\").
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • password: the password to use when connecting to the endpoint.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
  • autoStart: whether or not to auto-start the replication applier on (next and following) server starts
  • adaptivePolling: whether or not the replication applier will use adaptive polling.
  • includeSystem: whether or not system collection operations will be applied
  • requireFromPresent: if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • restrictType: the configuration for restrictCollections
  • restrictCollections: the optional array of collections to include or exclude, based on the setting of restrictType

Example:

shell> curl --dump - http://localhost:8529/_api/replication/applier-config\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"requestTimeout\" : 300, \n  \"connectTimeout\" : 10, \n  \"ignoreErrors\" : 0, \n  \"maxConnectRetries\" : 100, \n  \"sslProtocol\" : 0, \n  \"chunkSize\" : 0, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true, \n  \"includeSystem\" : true, \n  \"requireFromPresent\" : false, \n  \"verbose\" : false, \n  \"restrictType\" : \"\", \n  \"restrictCollections\" : [ ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return configuration of replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • database: the name of the database on the endpoint. If not specified, defaults to the current local database name.
  • restrictType: the configuration for restrictCollections; Has to be either include or exclude
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • requireFromPresent: if set to true, then the replication applier will check at start whether the start tick from which it starts or resumes replication is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • autoStart: whether or not to auto-start the replication applier on (next and following) server starts
  • adaptivePolling: if set to true, the replication applier will fall to sleep for an increasingly long period in case the logger server at the endpoint does not have any more replication events to apply. Using adaptive polling is thus useful to reduce the amount of work for both the applier and the logger server for cases when there are only infrequent changes. The downside is that when using adaptive polling, it might take longer for the replication applier to detect that there are new replication events on the logger server.
    Setting adaptivePolling to false will make the replication applier contact the logger server in a constant interval, regardless of whether the logger server provides updates frequently or seldom.
  • password: the password to use when connecting to the endpoint.
  • restrictCollections: the array of collections to include or exclude, based on the setting of restrictType of type string
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
\n\nSets the configuration of the replication applier. The configuration can only be changed while the applier is not running. The updated configuration will be saved immediately but only become active with the next start of the applier.
In case of success, the body of the response is a JSON object with the updated configuration.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/replication/applier-config <<EOF\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"username\" : \"replicationApplier\", \n  \"password\" : \"applier1234@foxx\", \n  \"chunkSize\" : 4194304, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\", \n  \"username\" : \"replicationApplier\", \n  \"requestTimeout\" : 300, \n  \"connectTimeout\" : 10, \n  \"ignoreErrors\" : 0, \n  \"maxConnectRetries\" : 100, \n  \"sslProtocol\" : 0, \n  \"chunkSize\" : 4194304, \n  \"autoStart\" : false, \n  \"adaptivePolling\" : true, \n  \"includeSystem\" : true, \n  \"requireFromPresent\" : false, \n  \"verbose\" : false, \n  \"restrictType\" : \"\", \n  \"restrictCollections\" : [ ] \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_applier_adjust" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed, or if the replication applier is currently running.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Adjust configuration of replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-start": { + "put": { + "description": "\n\nStarts the replication applier. This will return immediately if the replication applier is already running.
If the replication applier is not already running, the applier configuration will be checked, and if it is complete, the applier will be started in a background thread. This means that even if the applier will encounter any errors while running, they will not be reported in the response to this method.
To detect replication applier errors after the applier was started, use the /_api/replication/applier-state API instead.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/replication/applier-start\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:38:57Z\", \n      \"message\" : \"applier created\", \n      \"failedConnects\" : 0 \n    }, \n    \"totalRequests\" : 0, \n    \"totalFailedConnects\" : 0, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"errorNum\" : 0 \n    }, \n    \"time\" : \"2015-09-30T15:40:09Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [ + { + "description": "The remote lastLogTick value from which to start applying. If not specified, the last saved tick from the previous applier run is used. If there is no previous applier state saved, the applier will start at the beginning of the logger server's log.
", + "in": "query", + "name": "from", + "required": false, + "type": "string" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the replication applier is not fully configured or the configuration is invalid.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Start replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-state": { + "get": { + "description": "\n\nReturns the state of the replication applier, regardless of whether the applier is currently running or not.
The response is a JSON object with the following attributes:
  • state: a JSON object with the following sub-attributes:
    - running: whether or not the applier is active and running
    - lastAppliedContinuousTick: the last tick value from the continuous replication log the applier has applied.
    - lastProcessedContinuousTick: the last tick value from the continuous replication log the applier has processed.
    Regularly, the last applied and last processed tick values should be identical. For transactional operations, the replication applier will first process incoming log events before applying them, so the processed tick value might be higher than the applied tick value. This will be the case until the applier encounters the transaction commit log event for the transaction.
    - lastAvailableContinuousTick: the last tick value the logger server can provide.
    - time: the time on the applier server.
    - totalRequests: the total number of requests the applier has made to the endpoint.
    - totalFailedConnects: the total number of failed connection attempts the applier has made.
    - totalEvents: the total number of log events the applier has processed.
    - totalOperationsExcluded: the total number of log events excluded because of restrictCollections.
    - progress: a JSON object with details about the replication applier progress. It contains the following sub-attributes if there is progress to report:
    - message: a textual description of the progress
    - time: the date and time the progress was logged
    - failedConnects: the current number of failed connection attempts
    - lastError: a JSON object with details about the last error that happened on the applier. It contains the following sub-attributes if there was an error:
    - errorNum: a numerical error code
    - errorMessage: a textual error description
    - time: the date and time the error occurred
    In case no error has occurred, lastError will be empty.
  • server: a JSON object with the following sub-attributes:
    - version: the applier server's version
    - serverId: the applier server's id
  • endpoint: the endpoint the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
  • database: the name of the database the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)

Example: Fetching the state of an inactive applier:

shell> curl --dump - http://localhost:8529/_api/replication/applier-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : false, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:09Z\", \n      \"message\" : \"applier shut down\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 1, \n    \"totalFailedConnects\" : 1, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"errorMessage\" : \"could not connect to master at tcp://127.0.0.1:8529: Could not connect to 'tcp://127.0.0.1:8529' 'connect() failed with #111 - Connection refused'\", \n      \"errorNum\" : 1412 \n    }, \n    \"time\" : \"2015-09-30T15:40:10Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
Example: Fetching the state of an active applier:

shell> curl --dump - http://localhost:8529/_api/replication/applier-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"message\" : \"fetching master state information\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 2, \n    \"totalFailedConnects\" : 2, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"errorNum\" : 0 \n    }, \n    \"time\" : \"2015-09-30T15:40:10Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " State of the replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/applier-stop": { + "put": { + "description": "\n\nStops the replication applier. This will return immediately if the replication applier is not running.

Example:

shell> curl -X PUT --dump - http://localhost:8529/_api/replication/applier-stop\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : false, \n    \"lastAppliedContinuousTick\" : null, \n    \"lastProcessedContinuousTick\" : null, \n    \"lastAvailableContinuousTick\" : null, \n    \"safeResumeTick\" : null, \n    \"progress\" : { \n      \"time\" : \"2015-09-30T15:40:10Z\", \n      \"message\" : \"applier shut down\", \n      \"failedConnects\" : 1 \n    }, \n    \"totalRequests\" : 3, \n    \"totalFailedConnects\" : 3, \n    \"totalEvents\" : 0, \n    \"totalOperationsExcluded\" : 0, \n    \"lastError\" : { \n      \"time\" : \"2015-09-30T15:40:11Z\", \n      \"errorMessage\" : \"could not connect to master at tcp://127.0.0.1:8529: Could not connect to 'tcp://127.0.0.1:8529' 'connect() failed with #111 - Connection refused'\", \n      \"errorNum\" : 1412 \n    }, \n    \"time\" : \"2015-09-30T15:40:11Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"endpoint\" : \"tcp://127.0.0.1:8529\", \n  \"database\" : \"_system\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Stop replication applier", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/batch": { + "post": { + "description": "**A json post document with these Properties is required:**
  • ttl: the time-to-live for the new batch (in seconds)
    A JSON object with the batch configuration.
\n\nCreates a new dump batch and returns the batch's id.
The response is a JSON object with the following attributes:
  • id: the id of the batch
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_batch_replication" + }, + "x-description-offset": 59 + } + ], + "responses": { + "204": { + "description": "is returned if the batch was created successfully.
" + }, + "400": { + "description": "is returned if the ttl value is invalid or if DBserver attribute is not specified or illegal on a coordinator.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Create new dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/batch/{id}": { + "delete": { + "description": "\n\nDeletes the existing dump batch, allowing compaction and cleanup to resume.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "description": "The id of the batch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the batch was deleted successfully.
" + }, + "400": { + "description": "is returned if the batch was not found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Deletes an existing dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • ttl: the time-to-live for the new batch (in seconds)
\n\nExtends the ttl of an existing dump batch, using the batch's id and the provided ttl value.
If the batch's ttl can be extended successfully, the response is empty.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_batch_replication" + }, + "x-description-offset": 59 + }, + { + "description": "The id of the batch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "204": { + "description": "is returned if the batch's ttl was extended successfully.
" + }, + "400": { + "description": "is returned if the ttl value is invalid or the batch was not found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + } + }, + "summary": " Prolong existing dump batch", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/clusterInventory": { + "get": { + "description": "\n\nReturns the array of collections and indexes available on the cluster.
The response will be an array of JSON objects, one for each collection. Each collection containscontains exactly two keys \"parameters\" and \"indexes\". This information comes from Plan/Collections/{DB-Name}/* in the agency, just that the indexes attribute there is relocated to adjust it to the data format of arangodump.
", + "parameters": [ + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return cluster inventory of collections and indexes", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/dump": { + "get": { + "description": "\n\nReturns the data from the collection for the requested range.
When the from URL parameter is not used, collection events are returned from the beginning. When the from parameter is used, the result will only contain collection entries which have higher tick values than the specified from value (note: the log entry with a tick value equal to from will be excluded).
The to URL parameter can be used to optionally restrict the upper bound of the result to a certain tick value. If used, the result will only contain collection entries with tick values up to (including) to.
The chunkSize URL parameter can be used to control the size of the result. It must be specified in bytes. The chunkSize value will only be honored approximately. Otherwise a too low chunkSize value could cause the server to not be able to put just one entry into the result and return it. Therefore, the chunkSize value will only be consulted after an entry has been written into the result. If the result size is then bigger than chunkSize, the server will respond with as many entries as there are in the response already. If the result size is still smaller than chunkSize, the server will try to return more data if there's more data left to return.
If chunkSize is not specified, some server-side default value will be used.
The Content-Type of the result is application/x-arango-dump. This is an easy-to-process format, with all entries going onto separate lines in the response body.
Each line itself is a JSON object, with at least the following attributes:
  • tick: the operation's tick attribute
  • key: the key of the document/edge or the key used in the deletion operation
  • rev: the revision id of the document/edge or the deletion operation
  • data: the actual document/edge data for types 2300 and 2301. The full document/edge data will be returned even for updates.
  • type: the type of entry. Possible values for type are:
    - 2300: document insertion/update
    - 2301: edge insertion/update
    - 2302: document/edge deletion
Note: there will be no distinction between inserts and updates when calling this method.

Example: Empty collection:

shell> curl --dump - http://localhost:8529/_api/replication/dump?collection=testCollection\n\nHTTP/1.1 204 No Content\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-checkmore: false\nx-arango-replication-lastincluded: 0\n\n

\n
Example: Non-empty collection:

shell> curl --dump - http://localhost:8529/_api/replication/dump?collection=testCollection\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-checkmore: false\nx-arango-replication-lastincluded: 766695879\n\n\"{\\\"tick\\\":\\\"766040519\\\",\\\"type\\\":2300,\\\"key\\\":\\\"123456\\\",\\\"rev\\\":\\\"765974983\\\",\\\"data\\\":{\\\"_key\\\":\\\"123456\\\",\\\"_rev\\\":\\\"765974983\\\",\\\"c\\\":false,\\\"b\\\":1,\\\"d\\\":\\\"additional value\\\"}}\\n{\\\"tick\\\":\\\"766499271\\\",\\\"type\\\":2302,\\\"key\\\":\\\"foobar\\\",\\\"rev\\\":\\\"766433735\\\"}\\n{\\\"tick\\\":\\\"766695879\\\",\\\"type\\\":2302,\\\"key\\\":\\\"abcdef\\\",\\\"rev\\\":\\\"766630343\\\"}\\n\"\n

\n
", + "parameters": [ + { + "description": "The name or id of the collection to dump.
", + "in": "query", + "name": "collection", + "required": true, + "type": "string" + }, + { + "description": "Lower bound tick value for results.
", + "in": "query", + "name": "from", + "required": false, + "type": "number" + }, + { + "description": "Upper bound tick value for results.
", + "in": "query", + "name": "to", + "required": false, + "type": "number" + }, + { + "description": "Approximate maximum size of the returned result.
", + "in": "query", + "name": "chunkSize", + "required": false, + "type": "number" + }, + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to include tick values in the dump. The default value is true.
", + "in": "query", + "name": "ticks", + "required": false, + "type": "boolean" + }, + { + "description": "Whether or not to flush the WAL before dumping. The default value is true.
", + "in": "query", + "name": "flush", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully and data was returned. The header `x-arango-replication-lastincluded` is set to the tick of the last document returned.
" + }, + "204": { + "description": "is returned if the request was executed successfully, but there was no content available. The header `x-arango-replication-lastincluded` is `0` in this case.
" + }, + "400": { + "description": "is returned if either the from or to values are invalid.
" + }, + "404": { + "description": "is returned when the collection could not be found.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return data of a collection", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/inventory": { + "get": { + "description": "\n\nReturns the array of collections and indexes available on the server. This array can be used by replication clients to initiate an initial sync with the server.
The response will contain a JSON object with the collection and state and tick attributes.
collections is a array of collections with the following sub-attributes:
  • parameters: the collection properties
  • indexes: a array of the indexes of a the collection. Primary indexes and edges indexes are not included in this array.
The state attribute contains the current state of the replication logger. It contains the following sub-attributes:
  • running: whether or not the replication logger is currently active. Note: since ArangoDB 2.2, the value will always be true
  • lastLogTick: the value of the last tick the replication logger has written
  • time: the current time on the server
Replication clients should note the lastLogTick value returned. They can then fetch collections' data using the dump method up to the value of lastLogTick, and query the continuous replication log for log events after this tick value.
To create a full copy of the collections on the server, a replication client can execute these steps:
  • call the /inventory API method. This returns the lastLogTick value and the array of collections and indexes from the server.
  • for each collection returned by /inventory, create the collection locally and call /dump to stream the collection data to the client, up to the value of lastLogTick. After that, the client can create the indexes on the collections as they were reported by /inventory.
If the clients wants to continuously stream replication log events from the logger server, the following additional steps need to be carried out:
  • the client should call /logger-follow initially to fetch the first batch of replication events that were logged after the client's call to /inventory.
    The call to /logger-follow should use a from parameter with the value of the lastLogTick as reported by /inventory. The call to /logger-follow will return the x-arango-replication-lastincluded which will contain the last tick value included in the response.
  • the client can then continuously call /logger-follow to incrementally fetch new replication events that occurred after the last transfer.
    Calls should use a from parameter with the value of the x-arango-replication-lastincluded header of the previous response. If there are no more replication events, the response will be empty and clients can go to sleep for a while and try again later.
Note: on a coordinator, this request must have the URL parameter DBserver which must be an ID of a DBserver. The very same request is forwarded synchronously to that DBserver. It is an error if this attribute is not bound in the coordinator case.

Example:

shell> curl --dump - http://localhost:8529/_api/replication/inventory\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"7199175\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_apps\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"7461319\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"mount\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"4446663\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_aqlfunctions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2087367\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_graphs\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2218439\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_modules\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2349511\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 33554432, \n        \"name\" : \"_routing\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14145991\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_sessions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14866887\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_system_users_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"252359\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"580039\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"user\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : true \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"22206919\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"animals\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"21354951\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"demo\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    } \n  ], \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"767351239\", \n    \"totalEvents\" : 4726, \n    \"time\" : \"2015-09-30T15:40:13Z\" \n  }, \n  \"tick\" : \"767351239\" \n}\n

\n
Example: With some additional indexes:

shell> curl --dump - http://localhost:8529/_api/replication/inventory\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"collections\" : [ \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"7199175\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_apps\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"7461319\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"mount\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"4446663\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_aqlfunctions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2087367\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_graphs\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2218439\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_modules\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"2349511\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 33554432, \n        \"name\" : \"_routing\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14145991\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_sessions\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"14866887\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"_system_users_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"252359\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 4194304, \n        \"name\" : \"_users\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"580039\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"user\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : true, \n          \"sparse\" : true \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"22206919\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"animals\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"21354951\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"demo\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"767416775\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"IndexedCollection1\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"767678919\", \n          \"type\" : \"hash\", \n          \"fields\" : [ \n            \"name\" \n          ], \n          \"selectivityEstimate\" : 1, \n          \"unique\" : false, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"768006599\", \n          \"type\" : \"skiplist\", \n          \"fields\" : [ \n            \"a\", \n            \"b\" \n          ], \n          \"unique\" : true, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"768203207\", \n          \"type\" : \"cap\", \n          \"size\" : 500, \n          \"byteSize\" : 0, \n          \"unique\" : false \n        } \n      ] \n    }, \n    { \n      \"parameters\" : { \n        \"version\" : 5, \n        \"type\" : 2, \n        \"cid\" : \"768399815\", \n        \"indexBuckets\" : 8, \n        \"deleted\" : false, \n        \"doCompact\" : true, \n        \"maximalSize\" : 1048576, \n        \"name\" : \"IndexedCollection2\", \n        \"isVolatile\" : false, \n        \"waitForSync\" : false \n      }, \n      \"indexes\" : [ \n        { \n          \"id\" : \"768596423\", \n          \"type\" : \"fulltext\", \n          \"fields\" : [ \n            \"text\" \n          ], \n          \"unique\" : false, \n          \"sparse\" : true, \n          \"minLength\" : 10 \n        }, \n        { \n          \"id\" : \"768924103\", \n          \"type\" : \"skiplist\", \n          \"fields\" : [ \n            \"a\" \n          ], \n          \"unique\" : false, \n          \"sparse\" : false \n        }, \n        { \n          \"id\" : \"769120711\", \n          \"type\" : \"cap\", \n          \"size\" : 0, \n          \"byteSize\" : 1048576, \n          \"unique\" : false \n        } \n      ] \n    } \n  ], \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"767351239\", \n    \"totalEvents\" : 4739, \n    \"time\" : \"2015-09-30T15:40:13Z\" \n  }, \n  \"tick\" : \"769251783\" \n}\n

\n
", + "parameters": [ + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return inventory of collections and indexes", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-first-tick": { + "get": { + "description": "\n\nReturns the first available tick value that can be served from the server's replication log. This method can be called by replication clients after to determine if certain data (identified by a tick value) is still available for replication.
The result is a JSON object containing the attribute firstTick. This attribute contains the minimum tick value available in the server's replication log.
Note: this method is not supported on a coordinator in a cluster.

Example: Returning the first available tick

shell> curl --dump - http://localhost:8529/_api/replication/logger-first-tick\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n\"{\\\"firstTick\\\":\\\"383431\\\"}\"\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Returns the first available tick value", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-follow": { + "get": { + "description": "\n\nReturns data from the server's replication log. This method can be called by replication clients after an initial synchronization of data. The method will return all \"recent\" log entries from the logger server, and the clients can replay and apply these entries locally so they get to the same data state as the logger server.
Clients can call this method repeatedly to incrementally fetch all changes from the logger server. In this case, they should provide the from value so they will only get returned the log events since their last fetch.
When the from URL parameter is not used, the logger server will return log entries starting at the beginning of its replication log. When the from parameter is used, the logger server will only return log entries which have higher tick values than the specified from value (note: the log entry with a tick value equal to from will be excluded). Use the from value when incrementally fetching log data.
The to URL parameter can be used to optionally restrict the upper bound of the result to a certain tick value. If used, the result will contain only log events with tick values up to (including) to. In incremental fetching, there is no need to use the to parameter. It only makes sense in special situations, when only parts of the change log are required.
The chunkSize URL parameter can be used to control the size of the result. It must be specified in bytes. The chunkSize value will only be honored approximately. Otherwise a too low chunkSize value could cause the server to not be able to put just one log entry into the result and return it. Therefore, the chunkSize value will only be consulted after a log entry has been written into the result. If the result size is then bigger than chunkSize, the server will respond with as many log entries as there are in the response already. If the result size is still smaller than chunkSize, the server will try to return more data if there's more data left to return.
If chunkSize is not specified, some server-side default value will be used.
The Content-Type of the result is application/x-arango-dump. This is an easy-to-process format, with all log events going onto separate lines in the response body. Each log event itself is a JSON object, with at least the following attributes:
  • tick: the log event tick value
  • type: the log event type
Individual log events will also have additional attributes, depending on the event type. A few common attributes which are used for multiple events types are:
  • cid: id of the collection the event was for
  • tid: id of the transaction the event was contained in
  • key: document key
  • rev: document revision id
  • data: the original document data
A more detailed description of the individual replication event types and their data structures can be found in the manual.
The response will also contain the following HTTP headers:
  • x-arango-replication-active: whether or not the logger is active. Clients can use this flag as an indication for their polling frequency. If the logger is not active and there are no more replication events available, it might be sensible for a client to abort, or to go to sleep for a long time and try again later to check whether the logger has been activated.
  • x-arango-replication-lastincluded: the tick value of the last included value in the result. In incremental log fetching, this value can be used as the from value for the following request. Note that if the result is empty, the value will be 0. This value should not be used as from value by clients in the next request (otherwise the server would return the log events from the start of the log again).
  • x-arango-replication-lasttick: the last tick value the logger server has logged (not necessarily included in the result). By comparing the the last tick and last included tick values, clients have an approximate indication of how many events there are still left to fetch.
  • x-arango-replication-checkmore: whether or not there already exists more log data which the client could fetch immediately. If there is more log data available, the client could call logger-follow again with an adjusted from value to fetch remaining log entries until there are no more.
    If there isn't any more log data to fetch, the client might decide to go to sleep for a while before calling the logger again.
Note: this method is not supported on a coordinator in a cluster.

Example: No log events available

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=770628039\n\nHTTP/1.1 204 No Content\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: false\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 0\nx-arango-replication-lasttick: 770628039\n\n

\n
Example: A few log events

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=770628039\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: false\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 771873223\nx-arango-replication-lasttick: 771873223\n\n\"{\\\"tick\\\":\\\"770759111\\\",\\\"type\\\":2000,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"collection\\\":{\\\"version\\\":5,\\\"type\\\":2,\\\"cid\\\":\\\"770693575\\\",\\\"indexBuckets\\\":8,\\\"deleted\\\":false,\\\"doCompact\\\":true,\\\"maximalSize\\\":1048576,\\\"name\\\":\\\"products\\\",\\\"isVolatile\\\":false,\\\"waitForSync\\\":false}}\\n{\\\"tick\\\":\\\"771086791\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"771021255\\\",\\\"data\\\":{\\\"_key\\\":\\\"p1\\\",\\\"_rev\\\":\\\"771021255\\\",\\\"name\\\":\\\"flux compensator\\\"}}\\n{\\\"tick\\\":\\\"771414471\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p2\\\",\\\"rev\\\":\\\"771348935\\\",\\\"data\\\":{\\\"_key\\\":\\\"p2\\\",\\\"_rev\\\":\\\"771348935\\\",\\\"hp\\\":5100,\\\"name\\\":\\\"hybrid hovercraft\\\"}}\\n{\\\"tick\\\":\\\"771611079\\\",\\\"type\\\":2302,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"771545543\\\"}\\n{\\\"tick\\\":\\\"771807687\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p2\\\",\\\"rev\\\":\\\"771742151\\\",\\\"data\\\":{\\\"_key\\\":\\\"p2\\\",\\\"_rev\\\":\\\"771742151\\\"}}\\n{\\\"tick\\\":\\\"771873223\\\",\\\"type\\\":2001,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"770693575\\\"}\\n\"\n

\n
Example: More events than would fit into the response

shell> curl --dump - http://localhost:8529/_api/replication/logger-follow?from=769317319&chunkSize=400\n\nHTTP/1.1 200 OK\ncontent-type: application/x-arango-dump; charset=utf-8\nx-arango-replication-active: true\nx-arango-replication-checkmore: true\nx-arango-replication-frompresent: true\nx-arango-replication-lastincluded: 769841607\nx-arango-replication-lasttick: 770628039\n\n\"{\\\"tick\\\":\\\"769382855\\\",\\\"type\\\":2001,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"768399815\\\"}\\n{\\\"tick\\\":\\\"769513927\\\",\\\"type\\\":2000,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"769448391\\\",\\\"collection\\\":{\\\"version\\\":5,\\\"type\\\":2,\\\"cid\\\":\\\"769448391\\\",\\\"indexBuckets\\\":8,\\\"deleted\\\":false,\\\"doCompact\\\":true,\\\"maximalSize\\\":1048576,\\\"name\\\":\\\"products\\\",\\\"isVolatile\\\":false,\\\"waitForSync\\\":false}}\\n{\\\"tick\\\":\\\"769841607\\\",\\\"type\\\":2300,\\\"database\\\":\\\"121287\\\",\\\"cid\\\":\\\"769448391\\\",\\\"tid\\\":\\\"0\\\",\\\"key\\\":\\\"p1\\\",\\\"rev\\\":\\\"769776071\\\",\\\"data\\\":{\\\"_key\\\":\\\"p1\\\",\\\"_rev\\\":\\\"769776071\\\",\\\"name\\\":\\\"flux compensator\\\"}}\\n\"\n

\n
", + "parameters": [ + { + "description": "Lower bound tick value for results.
", + "in": "query", + "name": "from", + "required": false, + "type": "number" + }, + { + "description": "Upper bound tick value for results.
", + "in": "query", + "name": "to", + "required": false, + "type": "number" + }, + { + "description": "Approximate maximum size of the returned result.
", + "in": "query", + "name": "chunkSize", + "required": false, + "type": "number" + }, + { + "description": "Include system collections in the result. The default value is true.
", + "in": "query", + "name": "includeSystem", + "required": false, + "type": "boolean" + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully, and there are log events available for the requested range. The response body will not be empty in this case.
" + }, + "204": { + "description": "is returned if the request was executed successfully, but there are no log events available for the requested range. The response body will be empty in this case.
" + }, + "400": { + "description": "is returned if either the from or to values are invalid.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Returns log entries", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-state": { + "get": { + "description": "\n\nReturns the current state of the server's replication logger. The state will include information about whether the logger is running and about the last logged tick value. This tick value is important for incremental fetching of data.
The body of the response contains a JSON object with the following attributes:
  • state: the current logger state as a JSON object with the following sub-attributes:
    - running: whether or not the logger is running
    - lastLogTick: the tick value of the latest tick the logger has logged. This value can be used for incremental fetching of log data.
    - totalEvents: total number of events logged since the server was started. The value is not reset between multiple stops and re-starts of the logger.
    - time: the current date and time on the logger server
  • server: a JSON object with the following sub-attributes:
    - version: the logger server's version
    - serverId: the logger server's id
  • clients: returns the last fetch status by replication clients connected to the logger. Each client is returned as a JSON object with the following attributes:
    - serverId: server id of client
    - lastServedTick: last tick value served to this client via the logger-follow API
    - time: date and time when this client last called the logger-follow API

Example: Returns the state of the replication logger.

shell> curl --dump - http://localhost:8529/_api/replication/logger-state\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"state\" : { \n    \"running\" : true, \n    \"lastLogTick\" : \"771873223\", \n    \"totalEvents\" : 4761, \n    \"time\" : \"2015-09-30T15:40:17Z\" \n  }, \n  \"server\" : { \n    \"version\" : \"2.7.0-devel\", \n    \"serverId\" : \"4865533481307\" \n  }, \n  \"clients\" : [ ] \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the logger state could be determined successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if the logger state could not be determined.
" + } + }, + "summary": " Return replication logger state", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/logger-tick-ranges": { + "get": { + "description": "\n\nReturns the currently available ranges of tick values for all currently available WAL logfiles. The tick values can be used to determine if certain data (identified by tick value) are still available for replication.
The body of the response contains a JSON array. Each array member is an object that describes a single logfile. Each object has the following attributes:
*datafile: name of the logfile
*status: status of the datafile, in textual form (e.g. \"sealed\", \"open\")
*tickMin: minimum tick value contained in logfile
*tickMax: maximum tick value contained in logfile

Example: Returns the available tick ranges.

shell> curl --dump - http://localhost:8529/_api/replication/logger-tick-ranges\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-186823.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"383431\", \n    \"tickMax\" : \"642505159\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-642636231.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"642963911\", \n    \"tickMax\" : \"645716423\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-645847495.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"645978567\", \n    \"tickMax\" : \"766695879\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-766826951.db\", \n    \"status\" : \"collected\", \n    \"tickMin\" : \"766958023\", \n    \"tickMax\" : \"767089095\" \n  }, \n  { \n    \"datafile\" : \"/tmp/vocdir.2239/journals/logfile-767220167.db\", \n    \"status\" : \"open\", \n    \"tickMin\" : \"767351239\", \n    \"tickMax\" : \"771873223\" \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the tick ranges could be determined successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if the logger state could not be determined.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Return the tick ranges available in the WAL logfiles", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/make-slave": { + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the master.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • verbose: if set to true, then a log line will be emitted for all operations performed by the replication applier. This should be used for debugging replication problems only.
  • connectTimeout: the timeout (in seconds) when attempting to connect to the endpoint. This value is used for each connection attempt.
  • database: the database name on the master (if not specified, defaults to the name of the local current database).
  • requireFromPresent: if set to true, then the replication applier will check at start of its continuous replication if the start tick from the dump phase is still present on the master. If not, then there would be data loss. If requireFromPresent is true, the replication applier will abort with an appropriate error message. If set to false, then the replication applier will still start, and ignore the data loss.
  • requestTimeout: the timeout (in seconds) for individual requests to the endpoint.
  • restrictType: an optional string value for collection filtering. When specified, the allowed values are include or exclude.
  • restrictCollections: an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized. of type string
  • adaptivePolling: whether or not the replication applier will use adaptive polling.
  • maxConnectRetries: the maximum number of connection attempts the applier will make in a row. If the applier cannot establish a connection to the endpoint in this number of attempts, it will stop itself.
  • password: the password to use when connecting to the master.
  • chunkSize: the requested maximum size for log transfer packets that is used when the endpoint is contacted.
\n\nStarts a full data synchronization from a remote endpoint into the local ArangoDB database and afterwards starts the continuous replication. The operation works on a per-database level.
All local database data will be removed prior to the synchronization.
In case of success, the body of the response is a JSON object with the following attributes:
  • state: a JSON object with the following sub-attributes:
    - running: whether or not the applier is active and running
    - lastAppliedContinuousTick: the last tick value from the continuous replication log the applier has applied.
    - lastProcessedContinuousTick: the last tick value from the continuous replication log the applier has processed.
    Regularly, the last applied and last processed tick values should be identical. For transactional operations, the replication applier will first process incoming log events before applying them, so the processed tick value might be higher than the applied tick value. This will be the case until the applier encounters the transaction commit log event for the transaction.
    - lastAvailableContinuousTick: the last tick value the logger server can provide.
    - time: the time on the applier server.
    - totalRequests: the total number of requests the applier has made to the endpoint.
    - totalFailedConnects: the total number of failed connection attempts the applier has made.
    - totalEvents: the total number of log events the applier has processed.
    - totalOperationsExcluded: the total number of log events excluded because of restrictCollections.
    - progress: a JSON object with details about the replication applier progress. It contains the following sub-attributes if there is progress to report:
    - message: a textual description of the progress
    - time: the date and time the progress was logged
    - failedConnects: the current number of failed connection attempts
    - lastError: a JSON object with details about the last error that happened on the applier. It contains the following sub-attributes if there was an error:
    - errorNum: a numerical error code
    - errorMessage: a textual error description
    - time: the date and time the error occurred
    In case no error has occurred, lastError will be empty.
  • server: a JSON object with the following sub-attributes:
    - version: the applier server's version
    - serverId: the applier server's id
  • endpoint: the endpoint the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
  • database: the name of the database the applier is connected to (if applier is active) or will connect to (if applier is currently inactive)
WARNING: calling this method will sychronize data from the collections found on the remote master to the local ArangoDB database. All data in the local collections will be purged and replaced with data from the master.
Use with caution!
Please also keep in mind that this command may take a long time to complete and return. This is because it will first do a full data synchronization with the master, which will take time roughly proportional to the amount of data.
Note: this method is not supported on a coordinator in a cluster.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_makeSlave" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred during sychronization or when starting the continuous replication.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Turn the server into a slave of another", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/server-id": { + "get": { + "description": "\n\nReturns the servers id. The id is also returned by other replication API methods, and this method is an easy means of determining a server's id.
The body of the response is a JSON object with the attribute serverId. The server id is returned as a string.

Example:

shell> curl --dump - http://localhost:8529/_api/replication/server-id\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"serverId\" : \"4865533481307\" \n}\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.
" + } + }, + "summary": " Return server id", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/replication/sync": { + "put": { + "description": "**A json post document with these Properties is required:**
  • username: an optional ArangoDB username to use when connecting to the endpoint.
  • includeSystem: whether or not system collection operations will be applied
  • endpoint: the master endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").
  • database: the database name on the master (if not specified, defaults to the name of the local current database).
  • restrictType: an optional string value for collection filtering. When specified, the allowed values are include or exclude.
  • incremental: if set to true, then an incremental synchronization method will be used for synchronizing data in collections. This method is useful when collections already exist locally, and only the remaining differences need to be transferred from the remote endpoint. In this case, the incremental synchronization can be faster than a full synchronization. The default value is false, meaning that the complete data from the remote collection will be transferred.
  • restrictCollections: an optional array of collections for use with restrictType. If restrictType is include, only the specified collections will be sychronised. If restrictType is exclude, all but the specified collections will be synchronized. of type string
  • password: the password to use when connecting to the endpoint.
\n\nStarts a full data synchronization from a remote endpoint into the local ArangoDB database.
The sync method can be used by replication clients to connect an ArangoDB database to a remote endpoint, fetch the remote list of collections and indexes, and collection data. It will thus create a local backup of the state of data at the remote ArangoDB database. sync works on a per-database level.
sync will first fetch the list of collections and indexes from the remote endpoint. It does so by calling the inventory API of the remote database. It will then purge data in the local ArangoDB database, and after start will transfer collection data from the remote database to the local ArangoDB database. It will extract data from the remote database by calling the remote database's dump API until all data are fetched.
In case of success, the body of the response is a JSON object with the following attributes:
  • collections: an array of collections that were transferred from the endpoint
  • lastLogTick: the last log tick on the endpoint at the time the transfer was started. Use this value as the from value when starting the continuous synchronization later.
WARNING: calling this method will sychronize data from the collections found on the remote endpoint to the local ArangoDB database. All data in the local collections will be purged and replaced with data from the endpoint.
Use with caution!
Note: this method is not supported on a coordinator in a cluster.
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_replication_synchronize" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.
" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed.
" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.
" + }, + "500": { + "description": "is returned if an error occurred during sychronization.
" + }, + "501": { + "description": "is returned when this operation is called on a coordinator in a cluster.
" + } + }, + "summary": " Synchronize data from a remote endpoint", + "tags": [ + "Replication" + ], + "x-examples": [], + "x-filename": "Replication - arangod/RestHandler/RestReplicationHandler.cpp" + } + }, + "/_api/simple/all": { + "put": { + "description": "free style json body\n\n
Returns all documents of a collections. The call expects a JSON object as body with the following attributes:
  • collection: The name of the collection to query.
  • skip: The number of documents to skip in the query (optional).
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example: Limit the amount of documents using limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF\n{ \"collection\": \"products\", \"skip\": 2, \"limit\" : 2 }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"Hello3\" : \"World3\", \n      \"_id\" : \"products/774887879\", \n      \"_rev\" : \"774887879\", \n      \"_key\" : \"774887879\" \n    }, \n    { \n      \"Hello4\" : \"World4\", \n      \"_id\" : \"products/775215559\", \n      \"_rev\" : \"775215559\", \n      \"_key\" : \"775215559\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"cached\" : false, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Using a batchSize value

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/all <<EOF\n{ \"collection\": \"products\", \"batchSize\" : 3 }\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"Hello2\" : \"World2\", \n      \"_id\" : \"products/772594119\", \n      \"_rev\" : \"772594119\", \n      \"_key\" : \"772594119\" \n    }, \n    { \n      \"Hello1\" : \"World1\", \n      \"_id\" : \"products/772266439\", \n      \"_rev\" : \"772266439\", \n      \"_key\" : \"772266439\" \n    }, \n    { \n      \"Hello5\" : \"World5\", \n      \"_id\" : \"products/773577159\", \n      \"_rev\" : \"773577159\", \n      \"_key\" : \"773577159\" \n    } \n  ], \n  \"hasMore\" : true, \n  \"id\" : \"773773767\", \n  \"count\" : 5, \n  \"extra\" : { \n    \"stats\" : { \n      \"writesExecuted\" : 0, \n      \"writesIgnored\" : 0, \n      \"scannedFull\" : 5, \n      \"scannedIndex\" : 0, \n      \"filtered\" : 0 \n    }, \n    \"warnings\" : [ ] \n  }, \n  \"cached\" : false, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "description": "Contains the query.
", + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "additionalProperties": {}, + "type": "object" + }, + "x-description-offset": 0 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Return all documents", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/any": { + "put": { + "description": "\n\n
Returns a random document from a collection. The call expects a JSON object as body with the following attributes:
**A json post document with these Properties is required:**
  • collection: The identifier or name of the collection to query.
    Returns a JSON object with the document stored in the attribute document if the collection contains at least one document. If the collection is empty, the document attrbute contains null.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/any <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"document\" : { \n    \"_id\" : \"products/776460743\", \n    \"_key\" : \"776460743\", \n    \"_rev\" : \"776460743\", \n    \"Hello2\" : \"World2\" \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_any" + }, + "x-description-offset": 185 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Return a random document", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • skip: The number of documents to skip in the query (optional).
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • example: The example document.
  • collection: The name of the collection to query.
\n\n
This will find all documents matching a given example.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example: Matching an attribute

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"i\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/779082183\", \n      \"_key\" : \"779082183\", \n      \"_rev\" : \"779082183\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 2, \n        \"j\" : 2 \n      } \n    }, \n    { \n      \"_id\" : \"products/778295751\", \n      \"_key\" : \"778295751\", \n      \"_rev\" : \"778295751\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/778885575\", \n      \"_key\" : \"778885575\", \n      \"_rev\" : \"778885575\", \n      \"i\" : 1 \n    }, \n    { \n      \"_id\" : \"products/778623431\", \n      \"_key\" : \"778623431\", \n      \"_rev\" : \"778623431\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 4, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Matching an attribute which is a sub-document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a.j\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/780589511\", \n      \"_key\" : \"780589511\", \n      \"_rev\" : \"780589511\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/780261831\", \n      \"_key\" : \"780261831\", \n      \"_rev\" : \"780261831\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: Matching an attribute within a sub-document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  } \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/782555591\", \n      \"_key\" : \"782555591\", \n      \"_rev\" : \"782555591\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 1, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Simple query by-example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/first": { + "put": { + "description": "**A json post document with these Properties is required:**
  • count: the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
  • collection: the name of the collection
\n\n
This will return the first document(s) from the collection, in the order of insertion/update time. When the count argument is supplied, the result will be an array of documents, with the \"oldest\" document being first in the result array. If the count argument is not supplied, the result is the \"oldest\" document of the collection, or null if the collection is empty.
Note: this method is not supported for sharded collections with more than one shard.

Example: Retrieving the first n documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first <<EOF\n{ \n  \"collection\" : \"products\", \n  \"count\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/784193991\", \n      \"_key\" : \"784193991\", \n      \"_rev\" : \"784193991\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 1, \n        \"j\" : 1 \n      } \n    }, \n    { \n      \"_id\" : \"products/784521671\", \n      \"_key\" : \"784521671\", \n      \"_rev\" : \"784521671\", \n      \"i\" : 1, \n      \"a\" : { \n        \"j\" : 1 \n      } \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the first document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"_id\" : \"products/789633479\", \n    \"_key\" : \"789633479\", \n    \"_rev\" : \"789633479\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 1, \n      \"j\" : 1 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_first" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " First document of a collection", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/first-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • example: The example document.
  • collection: The name of the collection to query.
\n\n
This will return the first document matching a given example.
Returns a result containing the document or HTTP 404 if no document matched the example.
If more than one document in the collection matches the specified example, only one of these documents will be returned, and it is undefined which of the matching documents is returned.

Example: If a matching document was found

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"i\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"document\" : { \n    \"_id\" : \"products/786618823\", \n    \"_key\" : \"786618823\", \n    \"_rev\" : \"786618823\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 2, \n      \"j\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: If no document was found

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/first-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"l\" : 1 \n  } \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 404, \n  \"errorMessage\" : \"no match\" \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_first_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Find documents matching an example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/fulltext": { + "put": { + "description": "**A json post document with these Properties is required:**
  • index: The identifier of the fulltext-index to use.
  • attribute: The attribute that contains the texts.
  • collection: The name of the collection to query.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • skip: The number of documents to skip in the query (optional).
  • query: The fulltext query. Please refer to [Fulltext queries](../SimpleQueries/FulltextQueries.html) for details.
\n\n
This will find all documents from the collection that match the fulltext query specified in query.
In order to use the fulltext operator, a fulltext index must be defined for the collection and the specified attribute.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the fulltext simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an AQL query using the FULLTEXT [AQL function](../Aql/FulltextFunctions.md) as follows:

FOR doc IN FULLTEXT(@@collection, @attributeName, @queryString, @limit) RETURN doc

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/fulltext <<EOF\n{ \n  \"collection\" : \"products\", \n  \"attribute\" : \"text\", \n  \"query\" : \"word\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/791009735\", \n      \"_key\" : \"791009735\", \n      \"_rev\" : \"791009735\", \n      \"text\" : \"this text contains word\" \n    }, \n    { \n      \"_id\" : \"products/791206343\", \n      \"_key\" : \"791206343\", \n      \"_rev\" : \"791206343\", \n      \"text\" : \"this text also has a word\" \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_fulltext" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Fulltext index query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/last": { + "put": { + "description": "**A json post document with these Properties is required:**
  • count: the number of documents to return at most. Specifying count is optional. If it is not specified, it defaults to 1.
  • collection: the name of the collection
\n\n
This will return the last documents from the collection, in the order of insertion/update time. When the count argument is supplied, the result will be an array of documents, with the \"latest\" document being first in the result array.
If the count argument is not supplied, the result is the \"latest\" document of the collection, or null if the collection is empty.
Note: this method is not supported for sharded collections with more than one shard.

Example: Retrieving the last n documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/last <<EOF\n{ \n  \"collection\" : \"products\", \n  \"count\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/793369031\", \n      \"_key\" : \"793369031\", \n      \"_rev\" : \"793369031\", \n      \"i\" : 1, \n      \"a\" : { \n        \"k\" : 2, \n        \"j\" : 2 \n      } \n    }, \n    { \n      \"_id\" : \"products/793172423\", \n      \"_key\" : \"793172423\", \n      \"_rev\" : \"793172423\", \n      \"i\" : 1 \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Retrieving the first document

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/last <<EOF\n{ \n  \"collection\" : \"products\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"_id\" : \"products/795007431\", \n    \"_key\" : \"795007431\", \n    \"_rev\" : \"795007431\", \n    \"i\" : 1, \n    \"a\" : { \n      \"k\" : 2, \n      \"j\" : 2 \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_last" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned when the query was successfully executed.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Last document of a collection", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/lookup-by-keys": { + "put": { + "description": "**A json post document with these Properties is required:**
  • keys: array with the _keys of documents to remove. of type string
  • collection: The name of the collection to look in for the documents
\n\nLooks up the documents in the specified collection using the array of keys provided. All documents for which a matching key was specified in the keys array and that exist in the collection will be returned. Keys for which no document can be found in the underlying collection are ignored, and no exception will be thrown for them.
The body of the response contains a JSON object with a documents attribute. The documents attribute is an array containing the matching documents. The order in which matching documents are present in the result array is unspecified.

Example: Looking up existing documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"test0\", \n    \"test1\", \n    \"test2\", \n    \"test3\", \n    \"test4\", \n    \"test5\", \n    \"test6\", \n    \"test7\", \n    \"test8\", \n    \"test9\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ \n    { \n      \"value\" : 0, \n      \"_id\" : \"test/test0\", \n      \"_rev\" : \"795597255\", \n      \"_key\" : \"test0\" \n    }, \n    { \n      \"value\" : 1, \n      \"_id\" : \"test/test1\", \n      \"_rev\" : \"795793863\", \n      \"_key\" : \"test1\" \n    }, \n    { \n      \"value\" : 2, \n      \"_id\" : \"test/test2\", \n      \"_rev\" : \"795990471\", \n      \"_key\" : \"test2\" \n    }, \n    { \n      \"value\" : 3, \n      \"_id\" : \"test/test3\", \n      \"_rev\" : \"796187079\", \n      \"_key\" : \"test3\" \n    }, \n    { \n      \"value\" : 4, \n      \"_id\" : \"test/test4\", \n      \"_rev\" : \"796383687\", \n      \"_key\" : \"test4\" \n    }, \n    { \n      \"value\" : 5, \n      \"_id\" : \"test/test5\", \n      \"_rev\" : \"796580295\", \n      \"_key\" : \"test5\" \n    }, \n    { \n      \"value\" : 6, \n      \"_id\" : \"test/test6\", \n      \"_rev\" : \"796776903\", \n      \"_key\" : \"test6\" \n    }, \n    { \n      \"value\" : 7, \n      \"_id\" : \"test/test7\", \n      \"_rev\" : \"796973511\", \n      \"_key\" : \"test7\" \n    }, \n    { \n      \"value\" : 8, \n      \"_id\" : \"test/test8\", \n      \"_rev\" : \"797170119\", \n      \"_key\" : \"test8\" \n    }, \n    { \n      \"value\" : 9, \n      \"_id\" : \"test/test9\", \n      \"_rev\" : \"797366727\", \n      \"_key\" : \"test9\" \n    } \n  ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Looking up non-existing documents

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/lookup-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"foo\", \n    \"bar\", \n    \"baz\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"documents\" : [ ], \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/RestLookupByKeys" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the operation was carried out successfully.
" + }, + "404": { + "description": "is returned if the collection was not found. The response body contains an error document in this case.
" + }, + "405": { + "description": "is returned if the operation was called with a different HTTP METHOD than PUT.
" + } + }, + "summary": " Find documents by their keys", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/near": { + "put": { + "description": "**A json post document with these Properties is required:**
  • distance: If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
  • skip: The number of documents to skip in the query. (optional)
  • longitude: The longitude of the coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • collection: The name of the collection to query.
  • latitude: The latitude of the coordinate.
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
The default will find at most 100 documents near the given coordinate. The returned array is sorted according to the distance, with the nearest document being first in the return array. If there are near documents of equal distance, documents are chosen randomly from this set until the limit is reached.
In order to use the near operator, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.

Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the near simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an [AQL query](../Aql/GeoFunctions.md) using the NEAR function as follows:

FOR doc IN NEAR(@@collection, @latitude, @longitude, @limit) RETURN doc`

Example: Without distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/801823175\", \n      \"_key\" : \"801823175\", \n      \"_rev\" : \"801823175\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/801429959\", \n      \"_key\" : \"801429959\", \n      \"_rev\" : \"801429959\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: With distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 3, \n  \"distance\" : \"distance\" \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/804444615\", \n      \"_key\" : \"804444615\", \n      \"_rev\" : \"804444615\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/804837831\", \n      \"_key\" : \"804837831\", \n      \"_rev\" : \"804837831\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/804248007\", \n      \"_key\" : \"804248007\", \n      \"_rev\" : \"804248007\", \n      \"name\" : \"Name/-0.004/\", \n      \"loc\" : [ \n        -0.004, \n        0 \n      ], \n      \"distance\" : 444.779706578235 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 3, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_near" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Returns documents near a coordinate", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/range": { + "put": { + "description": "**A json post document with these Properties is required:**
  • right: The upper bound.
  • attribute: The attribute path to check.
  • collection: The name of the collection to query.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. (optional)
  • closed: If true, use interval including left and right, otherwise exclude right, but include left.
  • skip: The number of documents to skip in the query (optional).
  • left: The lower bound.
\n\n
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the range simple query is deprecated as of ArangoDB 2.6. The function may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection within a specific range is to use an AQL query as follows:

FOR doc IN @@collection FILTER doc.value >= @left && doc.value < @right LIMIT @skip, @limit RETURN doc`

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/range <<EOF\n{ \n  \"collection\" : \"products\", \n  \"attribute\" : \"i\", \n  \"left\" : 2, \n  \"right\" : 4 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/806738375\", \n      \"_key\" : \"806738375\", \n      \"_rev\" : \"806738375\", \n      \"i\" : 2 \n    }, \n    { \n      \"_id\" : \"products/806934983\", \n      \"_key\" : \"806934983\", \n      \"_rev\" : \"806934983\", \n      \"i\" : 3 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_range" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown or no suitable index for the range query is present. The response body contains an error document in this case.
" + } + }, + "summary": " Simple range query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/remove-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to remove from.
  • options: a json object which can contains following attributes:
    • limit: an optional value that determines how many documents to delete at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be deleted.
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
\n\n
This will find all documents in the collection that match the specified example object.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were deleted.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using Parameter: waitForSync and limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"waitForSync\" : true, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using Parameter: waitForSync and limit with new signature

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"options\" : { \n    \"waitForSync\" : true, \n    \"limit\" : 2 \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"deleted\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_remove_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Remove documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/remove-by-keys": { + "put": { + "description": "**A json post document with these Properties is required:**
  • keys: array with the _keys of documents to remove. of type string
  • options: a json object which can contains following attributes:
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • collection: The name of the collection to look in for the documents to remove
\n\nLooks up the documents in the specified collection using the array of keys provided, and removes all documents from the collection whose keys are contained in the keys array. Keys for which no document can be found in the underlying collection are ignored, and no exception will be thrown for them.
The body of the response contains a JSON object with information how many documents were removed (and how many were not). The removed attribute will contain the number of actually removed documents. The ignored attribute will contain the number of keys in the request for which no matching document could be found.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"test0\", \n    \"test1\", \n    \"test2\", \n    \"test3\", \n    \"test4\", \n    \"test5\", \n    \"test6\", \n    \"test7\", \n    \"test8\", \n    \"test9\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"removed\" : 10, \n  \"ignored\" : 0, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/remove-by-keys <<EOF\n{ \n  \"keys\" : [ \n    \"foo\", \n    \"bar\", \n    \"baz\" \n  ], \n  \"collection\" : \"test\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"removed\" : 0, \n  \"ignored\" : 3, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/RestRemoveByKeys" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the operation was carried out successfully. The number of removed documents may still be 0 in this case if none of the specified document keys were found in the collection.
" + }, + "404": { + "description": "is returned if the collection was not found. The response body contains an error document in this case.
" + }, + "405": { + "description": "is returned if the operation was called with a different HTTP METHOD than PUT.
" + } + }, + "summary": " Remove documents by their keys", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/replace-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • options: a json object which can contain following attributes
    • limit: an optional value that determines how many documents to replace at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be replaced.

    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to replace within.
  • newValue: The replacement document that will get inserted in place of the \"old\" documents.
\n\n
This will find all documents in the collection that match the specified example object, and replace the entire document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were replaced.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"foo\" : \"bar\" \n  }, \n  \"limit\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"replaced\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using new Signature for attributes WaitForSync and limit

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/replace-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"foo\" : \"bar\" \n  }, \n  \"options\" : { \n    \"limit\" : 3, \n    \"waitForSync\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"replaced\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_replace_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Replace documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/update-by-example": { + "put": { + "description": "**A json post document with these Properties is required:**
  • options: a json object which can contains following attributes:
    • keepNull: This parameter can be used to modify the behavior when handling null values. Normally, null values are stored in the database. By setting the keepNull parameter to false, this behavior can be changed so that all attributes in data with null values will be removed from the updated document.
    • limit: an optional value that determines how many documents to update at most. If limit is specified but is less than the number of documents in the collection, it is undefined which of the documents will be updated.
    • waitForSync: if set to true, then all removal operations will instantly be synchronized to disk. If this is not specified, then the collection's default sync behavior will be applied.
  • example: An example document that all collection documents are compared against.
  • collection: The name of the collection to update within.
  • newValue: A document containing all the attributes to update in the found documents.
\n\n
This will find all documents in the collection that match the specified example object, and partially update the document body with the new value specified. Note that document meta-attributes such as _id, _key, _from, _to etc. cannot be replaced.
Note: the limit attribute is not supported on sharded collections. Using it will result in an error.
Returns the number of documents that were updated.


Example: using old syntax for options

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"a\" : { \n      \"j\" : 22 \n    } \n  }, \n  \"limit\" : 3 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"updated\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: using new signature for options

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/update-by-example <<EOF\n{ \n  \"collection\" : \"products\", \n  \"example\" : { \n    \"a\" : { \n      \"j\" : 1 \n    } \n  }, \n  \"newValue\" : { \n    \"a\" : { \n      \"j\" : 22 \n    } \n  }, \n  \"options\" : { \n    \"limit\" : 3, \n    \"waitForSync\" : true \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"updated\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_update_by_example" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "is returned if the collection was updated successfully and waitForSync was true.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Update documents by example", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/within": { + "put": { + "description": "**A json post document with these Properties is required:**
  • distance: If given, the attribute key used to return the distance to the given coordinate. (optional). If specified, distances are returned in meters.
  • skip: The number of documents to skip in the query. (optional)
  • longitude: The longitude of the coordinate.
  • radius: The maximal radius (in meters).
  • collection: The name of the collection to query.
  • latitude: The latitude of the coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
This will find all documents within a given radius around the coordinate (latitude, longitude). The returned list is sorted by distance.
In order to use the within operator, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.

Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.
Note: the within simple query is deprecated as of ArangoDB 2.6. This API may be removed in future versions of ArangoDB. The preferred way for retrieving documents from a collection using the near operator is to issue an [AQL query](../Aql/GeoFunctions.md) using the WITHIN function as follows:

FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius, @distanceAttributeName) RETURN doc

Example: Without distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 2, \n  \"radius\" : 500 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/829610439\", \n      \"_key\" : \"829610439\", \n      \"_rev\" : \"829610439\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/829217223\", \n      \"_key\" : \"829217223\", \n      \"_rev\" : \"829217223\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
Example: With distance

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/near <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude\" : 0, \n  \"longitude\" : 0, \n  \"skip\" : 1, \n  \"limit\" : 3, \n  \"distance\" : \"distance\", \n  \"radius\" : 300 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/832231879\", \n      \"_key\" : \"832231879\", \n      \"_rev\" : \"832231879\", \n      \"name\" : \"Name/-0.002/\", \n      \"loc\" : [ \n        -0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/832625095\", \n      \"_key\" : \"832625095\", \n      \"_rev\" : \"832625095\", \n      \"name\" : \"Name/0.002/\", \n      \"loc\" : [ \n        0.002, \n        0 \n      ], \n      \"distance\" : 222.38985328911744 \n    }, \n    { \n      \"_id\" : \"products/832035271\", \n      \"_key\" : \"832035271\", \n      \"_rev\" : \"832035271\", \n      \"name\" : \"Name/-0.004/\", \n      \"loc\" : [ \n        -0.004, \n        0 \n      ], \n      \"distance\" : 444.779706578235 \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 3, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_within" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Find documents within a radius around a coordinate", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/simple/within-rectangle": { + "put": { + "description": "**A json post document with these Properties is required:**
  • latitude1: The latitude of the first rectangle coordinate.
  • skip: The number of documents to skip in the query. (optional)
  • latitude2: The latitude of the second rectangle coordinate.
  • longitude2: The longitude of the second rectangle coordinate.
  • longitude1: The longitude of the first rectangle coordinate.
  • limit: The maximal amount of documents to return. The skip is applied before the limit restriction. The default is 100. (optional)
  • collection: The name of the collection to query.
  • geo: If given, the identifier of the geo-index to use. (optional)
\n\n
This will find all documents within the specified rectangle (determined by the given coordinates (latitude1, longitude1, latitude2, longitude2).
In order to use the within-rectangle query, a geo index must be defined for the collection. This index also defines which attribute holds the coordinates for the document. If you have more than one geo-spatial index, you can use the geo field to select a particular index.
Returns a cursor containing the result, see [Http Cursor](../HttpAqlQueryCursor/README.md) for details.

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/simple/within-rectangle <<EOF\n{ \n  \"collection\" : \"products\", \n  \"latitude1\" : 0, \n  \"longitude1\" : 0, \n  \"latitude2\" : 0.2, \n  \"longitude2\" : 0.2, \n  \"skip\" : 1, \n  \"limit\" : 2 \n}\nEOF\n\nHTTP/1.1 201 Created\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : [ \n    { \n      \"_id\" : \"products/836229575\", \n      \"_key\" : \"836229575\", \n      \"_rev\" : \"836229575\", \n      \"name\" : \"Name/0.008/\", \n      \"loc\" : [ \n        0.008, \n        0 \n      ] \n    }, \n    { \n      \"_id\" : \"products/836032967\", \n      \"_key\" : \"836032967\", \n      \"_rev\" : \"836032967\", \n      \"name\" : \"Name/0.006/\", \n      \"loc\" : [ \n        0.006, \n        0 \n      ] \n    } \n  ], \n  \"hasMore\" : false, \n  \"count\" : 2, \n  \"error\" : false, \n  \"code\" : 201 \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSA_put_api_simple_within_rectangle" + }, + "x-description-offset": 59 + } + ], + "responses": { + "201": { + "description": "is returned if the query was executed successfully.
" + }, + "400": { + "description": "is returned if the body does not contain a valid JSON representation of a query. The response body contains an error document in this case.
" + }, + "404": { + "description": "is returned if the collection specified by collection is unknown. The response body contains an error document in this case.
" + } + }, + "summary": " Within rectangle query", + "tags": [ + "Simple Queries" + ], + "x-examples": [], + "x-filename": "Simple Queries - js/actions/api-simple.js, arangod/RestHandler/RestSimpleHandler.cpp, arangod/RestHandler/RestSimpleQueryHandler.cpp" + } + }, + "/_api/tasks": { + "post": { + "description": "**A json post document with these Properties is required:**
  • params: The parameters to be passed into command
  • offset: Number of seconds initial delay
  • command: The JavaScript code to be executed
  • name: The name of the task
  • period: number of seconds between the executions
\n\ncreates a new task with a generated id

Example:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/tasks/ <<EOF\n{ \n  \"name\" : \"SampleTask\", \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"params\" : { \n    \"foo\" : \"bar\", \n    \"bar\" : \"foo\" \n  }, \n  \"period\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"836884935\", \n  \"name\" : \"SampleTask\", \n  \"type\" : \"periodic\", \n  \"period\" : 2, \n  \"created\" : 1443627622.01888, \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\nshell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/836884935\n\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_new_tasks" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the post body is not accurate, a HTTP 400 is returned.
" + } + }, + "summary": " creates a task", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/tasks/": { + "get": { + "description": "\n\nfetches all existing tasks on the server

Example: Fetching all tasks

shell> curl --dump - http://localhost:8529/_api/tasks\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n[ \n  { \n    \"id\" : \"16898503\", \n    \"name\" : \"user-defined task\", \n    \"type\" : \"periodic\", \n    \"period\" : 1, \n    \"created\" : 1443627553.436199, \n    \"command\" : \"(function () {\\n      require('org/arangodb/foxx/queues/manager').manage();\\n    })(params)\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-gc\", \n    \"name\" : \"statistics-gc\", \n    \"type\" : \"periodic\", \n    \"period\" : 450, \n    \"created\" : 1443627552.94918, \n    \"command\" : \"require('org/arangodb/statistics').garbageCollector();\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-average-collector\", \n    \"name\" : \"statistics-average-collector\", \n    \"type\" : \"periodic\", \n    \"period\" : 900, \n    \"created\" : 1443627552.946052, \n    \"command\" : \"require('org/arangodb/statistics').historianAverage();\", \n    \"database\" : \"_system\" \n  }, \n  { \n    \"id\" : \"statistics-collector\", \n    \"name\" : \"statistics-collector\", \n    \"type\" : \"periodic\", \n    \"period\" : 10, \n    \"created\" : 1443627552.945114, \n    \"command\" : \"require('org/arangodb/statistics').historian();\", \n    \"database\" : \"_system\" \n  } \n]\n

\n
", + "parameters": [], + "responses": { + "200": { + "description": "The list of tasks
" + } + }, + "summary": " Fetch all tasks or one task", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/tasks/{id}": { + "delete": { + "description": "\n\nDeletes the task identified by id on the server.

Example: trying to delete non existing task

shell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/NoTaskWithThatName\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1852, \n  \"errorMessage\" : \"task not found\" \n}\n

\n
Example: Remove existing Task

shell> curl -X DELETE --dump - http://localhost:8529/_api/tasks/SampleTask\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to delete.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "404": { + "description": "If the task id is unknown, then an HTTP 404 is returned.
" + } + }, + "summary": " deletes the task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "get": { + "description": "\n\nfetches one existing tasks on the server specified by id

Example: Fetching a single task by its id

shell> curl --dump - http://localhost:8529/_api/tasks/statistics-average-collector\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"statistics-average-collector\", \n  \"name\" : \"statistics-average-collector\", \n  \"type\" : \"periodic\", \n  \"period\" : 900, \n  \"created\" : 1443627552.946052, \n  \"command\" : \"require('org/arangodb/statistics').historianAverage();\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: trying to fetch a non-existing task

shell> curl --dump - http://localhost:8529/_api/tasks/non-existing-task\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1852, \n  \"errorMessage\" : \"task not found\" \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to fetch.
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The requested task
" + } + }, + "summary": " Fetch one task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + }, + "put": { + "description": "**A json post document with these Properties is required:**
  • params: The parameters to be passed into command
  • offset: Number of seconds initial delay
  • command: The JavaScript code to be executed
  • name: The name of the task
  • period: number of seconds between the executions
\n\nregisters a new task with the specified id

Example:

shell> curl -X PUT --data-binary @- --dump - http://localhost:8529/_api/tasks/sampleTask <<EOF\n{ \n  \"id\" : \"SampleTask\", \n  \"name\" : \"SampleTask\", \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"params\" : { \n    \"foo\" : \"bar\", \n    \"bar\" : \"foo\" \n  }, \n  \"period\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"id\" : \"sampleTask\", \n  \"name\" : \"SampleTask\", \n  \"type\" : \"periodic\", \n  \"period\" : 2, \n  \"created\" : 1443627622.623117, \n  \"command\" : \"(function(params) { require('internal').print(params); })(params)\", \n  \"database\" : \"_system\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
", + "parameters": [ + { + "description": "The id of the task to create
", + "format": "string", + "in": "path", + "name": "id", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_put_api_new_tasks" + }, + "x-description-offset": 59 + } + ], + "responses": { + "400": { + "description": "If the task id already exists or the rest body is not accurate, HTTP 400 is returned.
" + } + }, + "summary": " creates a task with id", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + }, + "/_api/transaction": { + "post": { + "description": "**A json post document with these Properties is required:**
  • action: the actual transaction operations to be executed, in the form of stringified JavaScript code. The code will be executed on server side, with late binding. It is thus critical that the code specified in action properly sets up all the variables it needs. If the code specified in action ends with a return statement, the value returned will also be returned by the REST API in the result attribute if the transaction committed successfully.
  • params: optional arguments passed to action.
  • collections: contains the array of collections to be used in the transaction (mandatory). collections must be a JSON object that can have the optional sub-attributes read and write. read and write must each be either arrays of collections names or strings with a single collection name.
  • lockTimeout: an optional numeric value that can be used to set a timeout for waiting on collection locks. If not specified, a default value will be used. Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock.
  • waitForSync: an optional boolean flag that, if set, will force the transaction to write all data to disk before returning.
\n\n
Contains the collections and action.
The transaction description must be passed in the body of the POST request.
If the transaction is fully executed and committed on the server, HTTP 200 will be returned. Additionally, the return value of the code defined in action will be returned in the result attribute.
For successfully committed transactions, the returned JSON object has the following properties:
  • error: boolean flag to indicate if an error occurred (false in this case)
  • code: the HTTP status code
  • result: the return value of the transaction
If the transaction specification is either missing or malformed, the server will respond with HTTP 400.
The body of the response will then contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message
If a transaction fails to commit, either by an exception thrown in the action code, or by an internal error, the server will respond with an error. Any other errors will be returned with any of the return codes HTTP 400, HTTP 409, or HTTP 500.

Example: Executing a transaction on a single collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : \"products\" \n  }, \n  \"action\" : \"function () { var db = require('internal').db; db.products.save({});  return db.products.count(); }\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : 1, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Executing a transaction using multiple collections

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : [ \n      \"products\", \n      \"materials\" \n    ] \n  }, \n  \"action\" : \"function () {var db = require('internal').db;db.products.save({});db.materials.save({});return 'worked!';}\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : \"worked!\", \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Aborting a transaction due to an internal error

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"write\" : \"products\" \n  }, \n  \"action\" : \"function () {var db = require('internal').db;db.products.save({ _key: 'abc'});db.products.save({ _key: 'abc'});}\" \n}\nEOF\n\nHTTP/1.1 400 Bad Request\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"[ArangoError 1210: unique constraint violated]\", \n  \"stacktrace\" : [ \n    \"[ArangoError 1210: unique constraint violated]\", \n    \"  at Error (native)\", \n    \"  at eval (<anonymous>:1:99)\", \n    \"  at eval (<anonymous>:1:122)\", \n    \"  at post_api_transaction (js/actions/api-transaction.js:268:16)\", \n    \"  at Function.actions.defineHttp.callback (js/actions/api-transaction.js:288:11)\" \n  ], \n  \"message\" : \"unique constraint violated\", \n  \"error\" : true, \n  \"code\" : 400, \n  \"errorNum\" : 1210, \n  \"errorMessage\" : \"unique constraint violated\" \n}\n

\n
Example: Aborting a transaction by explicitly throwing an exception

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"read\" : \"products\" \n  }, \n  \"action\" : \"function () { throw 'doh!'; }\" \n}\nEOF\n\nHTTP/1.1 500 Internal Server Error\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"doh!\", \n  \"error\" : true, \n  \"code\" : 500, \n  \"errorNum\" : 500, \n  \"errorMessage\" : \"internal server error\" \n}\n

\n
Example: Referring to a non-existing collection

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/transaction <<EOF\n{ \n  \"collections\" : { \n    \"read\" : \"products\" \n  }, \n  \"action\" : \"function () { return true; }\" \n}\nEOF\n\nHTTP/1.1 404 Not Found\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"exception\" : \"[ArangoError 1203: collection not found]\", \n  \"stacktrace\" : [ \n    \"[ArangoError 1203: collection not found]\", \n    \"  at Error (native)\", \n    \"  at post_api_transaction (js/actions/api-transaction.js:268:16)\", \n    \"  at Function.actions.defineHttp.callback (js/actions/api-transaction.js:288:11)\" \n  ], \n  \"message\" : \"collection not found\", \n  \"error\" : true, \n  \"code\" : 404, \n  \"errorNum\" : 1203, \n  \"errorMessage\" : \"collection not found\" \n}\n

\n
", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_post_api_transaction" + }, + "x-description-offset": 59 + } + ], + "responses": { + "200": { + "description": "If the transaction is fully executed and committed on the server, HTTP 200 will be returned.
" + }, + "400": { + "description": "If the transaction specification is either missing or malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "If the transaction specification contains an unknown collection, the server will respond with HTTP 404.
" + }, + "500": { + "description": "Exceptions thrown by users will make the server respond with a return code of HTTP 500
" + } + }, + "summary": " Execute transaction", + "tags": [ + "Transactions" + ], + "x-examples": [], + "x-filename": "Transactions - js/actions/api-transaction.js" + } + }, + "/_api/traversal": { + "post": { + "description": "\n\nStarts a traversal starting from a given vertex and following. edges contained in a given edgeCollection. The request must contain the following attributes.
**A json post document with these Properties is required:**
  • sort: body (JavaScript) code of a custom comparison function for the edges. The signature of this function is (l, r) -> integer (where l and r are edges) and must return -1 if l is smaller than, +1 if l is greater than, and 0 if l and r are equal. The reason for this is the following: The order of edges returned for a certain vertex is undefined. This is because there is no natural order of edges for a vertex with multiple connected edges. To explicitly define the order in which edges on the vertex are followed, you can specify an edge comparator function with this attribute. Note that the value here has to be a string to conform to the JSON standard, which in turn is parsed as function body on the server side. Furthermore note that this attribute is only used for the standard expanders. If you use your custom expander you have to do the sorting yourself within the expander code.
  • direction: direction for traversal
    • if set, must be either \"outbound\", \"inbound\", or \"any\"
    • if not set, the expander attribute must be specified
  • minDepth: ANDed with any existing filters): visits only nodes in at least the given depth
  • startVertex: id of the startVertex, e.g. \"users/foo\".
  • visitor: body (JavaScript) code of custom visitor function function signature: (config, result, vertex, path, connected) -> void The visitor function can do anything, but its return value is ignored. To populate a result, use the result variable by reference. Note that the connected argument is only populated when the order attribute is set to \"preorder-expander\".
  • itemOrder: item iteration order can be \"forward\" or \"backward\"
  • strategy: traversal strategy can be \"depthfirst\" or \"breadthfirst\"
  • filter: default is to include all nodes: body (JavaScript code) of custom filter function function signature: (config, vertex, path) -> mixed can return four different string values:
    • \"exclude\" -> this vertex will not be visited.
    • \"prune\" -> the edges of this vertex will not be followed.
    • \"\" or undefined -> visit the vertex and follow it's edges.
    • Array -> containing any combination of the above. If there is at least one \"exclude\" or \"prune\" respectivly is contained, it's effect will occur.
  • init: body (JavaScript) code of custom result initialization function function signature: (config, result) -> void initialize any values in result with what is required
  • maxIterations: Maximum number of iterations in each traversal. This number can be set to prevent endless loops in traversal of cyclic graphs. When a traversal performs as many iterations as the maxIterations value, the traversal will abort with an error. If maxIterations is not set, a server-defined value may be used.
  • maxDepth: ANDed with any existing filters visits only nodes in at most the given depth
  • uniqueness: specifies uniqueness for vertices and edges visited if set, must be an object like this:
    \"uniqueness\": {\"vertices\": \"none\"|\"global\"|\"path\", \"edges\": \"none\"|\"global\"|\"path\"}
  • order: traversal order can be \"preorder\", \"postorder\" or \"preorder-expander\"
  • graphName: name of the graph that contains the edges. Either edgeCollection or graphName has to be given. In case both values are set the graphName is prefered.
  • expander: body (JavaScript) code of custom expander function must be set if direction attribute is not set function signature: (config, vertex, path) -> array expander must return an array of the connections for vertex each connection is an object with the attributes edge and vertex
  • edgeCollection: name of the collection that contains the edges.
\n\n
If the Traversal is successfully executed HTTP 200 will be returned. Additionally the result object will be returned by the traversal.
For successful traversals, the returned JSON object has the following properties:
  • error: boolean flag to indicate if an error occurred (false in this case)
  • code: the HTTP status code
  • result: the return value of the traversal
If the traversal specification is either missing or malformed, the server will respond with HTTP 400.
The body of the response will then contain a JSON object with additional error details. The object has the following attributes:
  • error: boolean flag to indicate that an error occurred (true in this case)
  • code: the HTTP status code
  • errorNum: the server error number
  • errorMessage: a descriptive error message

Example: In the following examples the underlying graph will contain five persons Alice, Bob, Charlie, Dave and Eve. We will have the following directed relations: - Alice knows Bob - Bob knows Charlie - Bob knows Dave - Eve knows Alice - Eve knows Bob
The starting vertex will always be Alice.
Follow only outbound edges


shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"890100167\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"890296775\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"890558919\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"890755527\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/891410887\", \n              \"_key\" : \"891410887\", \n              \"_rev\" : \"891410887\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"890558919\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/891214279\", \n              \"_key\" : \"891214279\", \n              \"_rev\" : \"891214279\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/891607495\", \n              \"_key\" : \"891607495\", \n              \"_rev\" : \"891607495\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"890100167\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"890296775\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"890755527\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow only inbound edges

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"inbound\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"871619015\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"872470983\", \n          \"name\" : \"Eve\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"871619015\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/873322951\", \n              \"_key\" : \"873322951\", \n              \"_rev\" : \"873322951\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"871619015\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"872470983\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow any direction of edges

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"global\" \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"841537991\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"842389959\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"841734599\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"841537991\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"841996743\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"842193351\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/842652103\", \n              \"_key\" : \"842652103\", \n              \"_rev\" : \"842652103\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/842848711\", \n              \"_key\" : \"842848711\", \n              \"_rev\" : \"842848711\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"841996743\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/843241927\", \n              \"_key\" : \"843241927\", \n              \"_rev\" : \"843241927\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/843438535\", \n              \"_key\" : \"843438535\", \n              \"_rev\" : \"843438535\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/843045319\", \n              \"_key\" : \"843045319\", \n              \"_rev\" : \"843045319\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"841537991\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"842389959\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"841734599\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"842193351\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Excluding Charlie and Bob

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"filter\" : \"if (vertex.name === \\\"Bob\\\" ||     vertex.name === \\\"Charlie\\\") {  return \\\"exclude\\\";}return;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"863427015\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"864082375\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"863427015\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/864541127\", \n              \"_key\" : \"864541127\", \n              \"_rev\" : \"864541127\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/864934343\", \n              \"_key\" : \"864934343\", \n              \"_rev\" : \"864934343\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"863427015\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"863623623\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"864082375\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Do not follow edges from Bob

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"filter\" : \"if (vertex.name === \\\"Bob\\\") {return \\\"prune\\\";}return;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"867686855\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"867883463\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"867686855\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/868800967\", \n              \"_key\" : \"868800967\", \n              \"_rev\" : \"868800967\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"867686855\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"867883463\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Visit only nodes in a depth of at least 2

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"minDepth\" : 2 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"886299079\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"886495687\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/886954439\", \n              \"_key\" : \"886954439\", \n              \"_rev\" : \"886954439\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/887151047\", \n              \"_key\" : \"887151047\", \n              \"_rev\" : \"887151047\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"885840327\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"886036935\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"886299079\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/886954439\", \n              \"_key\" : \"886954439\", \n              \"_rev\" : \"886954439\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/887347655\", \n              \"_key\" : \"887347655\", \n              \"_rev\" : \"887347655\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"885840327\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"886036935\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"886495687\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Visit only nodes in a depth of at most 1

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"maxDepth\" : 1 \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"875616711\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"875813319\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"875616711\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/876730823\", \n              \"_key\" : \"876730823\", \n              \"_rev\" : \"876730823\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"875616711\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"875813319\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using a visitor function to return vertex ids only

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"visitor\" : \"result.visited.vertices.push(vertex._id);\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        \"persons/alice\", \n        \"persons/bob\", \n        \"persons/charlie\", \n        \"persons/dave\" \n      ], \n      \"paths\" : [ ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Count all visited nodes and return a list of nodes only

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"outbound\", \n  \"init\" : \"result.visited = 0; result.myVertices = [ ];\", \n  \"visitor\" : \"result.visited++; result.myVertices.push(vertex);\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : 4, \n    \"myVertices\" : [ \n      { \n        \"_id\" : \"persons/alice\", \n        \"_key\" : \"alice\", \n        \"_rev\" : \"900323783\", \n        \"name\" : \"Alice\" \n      }, \n      { \n        \"_id\" : \"persons/bob\", \n        \"_key\" : \"bob\", \n        \"_rev\" : \"900520391\", \n        \"name\" : \"Bob\" \n      }, \n      { \n        \"_id\" : \"persons/charlie\", \n        \"_key\" : \"charlie\", \n        \"_rev\" : \"900782535\", \n        \"name\" : \"Charlie\" \n      }, \n      { \n        \"_id\" : \"persons/dave\", \n        \"_key\" : \"dave\", \n        \"_rev\" : \"900979143\", \n        \"name\" : \"Dave\" \n      } \n    ] \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Expand only inbound edges of Alice and outbound edges of Eve

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"expander\" : \"var connections = [ ];if (vertex.name === \\\"Alice\\\") {config.datasource.getInEdges(vertex).forEach(function (e) {connections.push({ vertex: require(\\\"internal\\\").db._document(e._from), edge: e});});}if (vertex.name === \\\"Eve\\\") {config.datasource.getOutEdges(vertex).forEach(function (e) {connections.push({vertex: require(\\\"internal\\\").db._document(e._to), edge: e});});}return connections;\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"904583623\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"905435591\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"904780231\", \n          \"name\" : \"Bob\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/906287559\", \n              \"_key\" : \"906287559\", \n              \"_rev\" : \"906287559\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"905435591\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/906287559\", \n              \"_key\" : \"906287559\", \n              \"_rev\" : \"906287559\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/906484167\", \n              \"_key\" : \"906484167\", \n              \"_rev\" : \"906484167\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"904583623\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"905435591\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"904780231\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Follow the depthfirst strategy

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"strategy\" : \"depthfirst\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"853334471\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"852679111\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"852941255\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"853137863\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"852679111\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"853334471\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"852482503\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"852941255\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"853137863\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853793223\", \n              \"_key\" : \"853793223\", \n              \"_rev\" : \"853793223\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"852941255\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853989831\", \n              \"_key\" : \"853989831\", \n              \"_rev\" : \"853989831\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"853137863\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854383047\", \n              \"_key\" : \"854383047\", \n              \"_rev\" : \"854383047\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/854186439\", \n              \"_key\" : \"854186439\", \n              \"_rev\" : \"854186439\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"853334471\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853793223\", \n              \"_key\" : \"853793223\", \n              \"_rev\" : \"853793223\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"852941255\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/853596615\", \n              \"_key\" : \"853596615\", \n              \"_rev\" : \"853596615\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/853989831\", \n              \"_key\" : \"853989831\", \n              \"_rev\" : \"853989831\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"852482503\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"852679111\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"853137863\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using postorder ordering

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"order\" : \"postorder\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"894818759\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"895015367\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"894556615\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"895211975\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"895211975\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"894818759\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"895015367\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"894556615\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"894360007\", \n          \"name\" : \"Alice\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895670727\", \n              \"_key\" : \"895670727\", \n              \"_rev\" : \"895670727\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"894818759\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895867335\", \n              \"_key\" : \"895867335\", \n              \"_rev\" : \"895867335\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"895015367\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896063943\", \n              \"_key\" : \"896063943\", \n              \"_rev\" : \"896063943\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/896260551\", \n              \"_key\" : \"896260551\", \n              \"_rev\" : \"896260551\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"895211975\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895670727\", \n              \"_key\" : \"895670727\", \n              \"_rev\" : \"895670727\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"894818759\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/895867335\", \n              \"_key\" : \"895867335\", \n              \"_rev\" : \"895867335\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"895015367\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/895474119\", \n              \"_key\" : \"895474119\", \n              \"_rev\" : \"895474119\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"894556615\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"894360007\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Using backward item-ordering:

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"itemOrder\" : \"backward\" \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"846715335\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"847174087\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"846977479\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"847370695\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"847370695\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"846715335\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"847174087\", \n          \"name\" : \"Dave\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"846977479\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"846518727\", \n          \"name\" : \"Alice\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848026055\", \n              \"_key\" : \"848026055\", \n              \"_rev\" : \"848026055\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"847174087\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847829447\", \n              \"_key\" : \"847829447\", \n              \"_rev\" : \"847829447\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"846977479\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/848026055\", \n              \"_key\" : \"848026055\", \n              \"_rev\" : \"848026055\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"847174087\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847829447\", \n              \"_key\" : \"847829447\", \n              \"_rev\" : \"847829447\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"846977479\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/848222663\", \n              \"_key\" : \"848222663\", \n              \"_rev\" : \"848222663\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/848419271\", \n              \"_key\" : \"848419271\", \n              \"_rev\" : \"848419271\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/847632839\", \n              \"_key\" : \"847632839\", \n              \"_rev\" : \"847632839\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"847370695\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"846715335\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"846518727\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: Edges should only be included once globally, but nodes are included every time they are visited

shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"global\" \n  } \n}\nEOF\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"result\" : { \n    \"visited\" : { \n      \"vertices\" : [ \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"858446279\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/eve\", \n          \"_key\" : \"eve\", \n          \"_rev\" : \"859298247\", \n          \"name\" : \"Eve\" \n        }, \n        { \n          \"_id\" : \"persons/bob\", \n          \"_key\" : \"bob\", \n          \"_rev\" : \"858642887\", \n          \"name\" : \"Bob\" \n        }, \n        { \n          \"_id\" : \"persons/alice\", \n          \"_key\" : \"alice\", \n          \"_rev\" : \"858446279\", \n          \"name\" : \"Alice\" \n        }, \n        { \n          \"_id\" : \"persons/charlie\", \n          \"_key\" : \"charlie\", \n          \"_rev\" : \"858905031\", \n          \"name\" : \"Charlie\" \n        }, \n        { \n          \"_id\" : \"persons/dave\", \n          \"_key\" : \"dave\", \n          \"_rev\" : \"859101639\", \n          \"name\" : \"Dave\" \n        } \n      ], \n      \"paths\" : [ \n        { \n          \"edges\" : [ ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859560391\", \n              \"_key\" : \"859560391\", \n              \"_rev\" : \"859560391\", \n              \"_from\" : \"persons/alice\", \n              \"_to\" : \"persons/bob\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859756999\", \n              \"_key\" : \"859756999\", \n              \"_rev\" : \"859756999\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/charlie\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/charlie\", \n              \"_key\" : \"charlie\", \n              \"_rev\" : \"858905031\", \n              \"name\" : \"Charlie\" \n            } \n          ] \n        }, \n        { \n          \"edges\" : [ \n            { \n              \"_id\" : \"knows/860150215\", \n              \"_key\" : \"860150215\", \n              \"_rev\" : \"860150215\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/alice\" \n            }, \n            { \n              \"_id\" : \"knows/860346823\", \n              \"_key\" : \"860346823\", \n              \"_rev\" : \"860346823\", \n              \"_from\" : \"persons/eve\", \n              \"_to\" : \"persons/bob\" \n            }, \n            { \n              \"_id\" : \"knows/859953607\", \n              \"_key\" : \"859953607\", \n              \"_rev\" : \"859953607\", \n              \"_from\" : \"persons/bob\", \n              \"_to\" : \"persons/dave\" \n            } \n          ], \n          \"vertices\" : [ \n            { \n              \"_id\" : \"persons/alice\", \n              \"_key\" : \"alice\", \n              \"_rev\" : \"858446279\", \n              \"name\" : \"Alice\" \n            }, \n            { \n              \"_id\" : \"persons/eve\", \n              \"_key\" : \"eve\", \n              \"_rev\" : \"859298247\", \n              \"name\" : \"Eve\" \n            }, \n            { \n              \"_id\" : \"persons/bob\", \n              \"_key\" : \"bob\", \n              \"_rev\" : \"858642887\", \n              \"name\" : \"Bob\" \n            }, \n            { \n              \"_id\" : \"persons/dave\", \n              \"_key\" : \"dave\", \n              \"_rev\" : \"859101639\", \n              \"name\" : \"Dave\" \n            } \n          ] \n        } \n      ] \n    } \n  }, \n  \"error\" : false, \n  \"code\" : 200 \n}\n

\n
Example: If the underlying graph is cyclic, maxIterations should be set
The underlying graph has two vertices Alice and Bob. With the directed edges:
  • Alice knows Bob _ Bob knows Alice


shell> curl -X POST --data-binary @- --dump - http://localhost:8529/_api/traversal <<EOF\n{ \n  \"startVertex\" : \"persons/alice\", \n  \"graphName\" : \"knows_graph\", \n  \"direction\" : \"any\", \n  \"uniqueness\" : { \n    \"vertices\" : \"none\", \n    \"edges\" : \"none\" \n  }, \n  \"maxIterations\" : 5 \n}\nEOF\n\nHTTP/1.1 500 Internal Server Error\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"error\" : true, \n  \"code\" : 500, \n  \"errorNum\" : 1909, \n  \"errorMessage\" : \"too many iterations - try increasing the value of 'maxIterations'\" \n}\n

\n

", + "parameters": [ + { + "in": "body", + "name": "Json Post Body", + "required": true, + "schema": { + "$ref": "#/definitions/JSF_HTTP_API_TRAVERSAL" + }, + "x-description-offset": 222 + } + ], + "responses": { + "200": { + "description": "If the traversal is fully executed HTTP 200 will be returned.
" + }, + "400": { + "description": "If the traversal specification is either missing or malformed, the server will respond with HTTP 400.
" + }, + "404": { + "description": "The server will responded with HTTP 404 if the specified edge collection does not exist, or the specified start vertex cannot be found.
" + }, + "500": { + "description": "The server will responded with HTTP 500 when an error occurs inside the traversal or if a traversal performs more than maxIterations iterations.
" + } + }, + "summary": "executes a traversal", + "tags": [ + "Graph Traversal" + ], + "x-examples": [], + "x-filename": "Graph Traversal - js/actions/api-traversal.js" + } + }, + "/_api/user": { + "post": { + "description": "\n\n
The following data need to be passed in a JSON representation in the body of the POST request:
  • user: The name of the user as a string. This is mandatory.
  • passwd: The user password as a string. If no password is specified, the empty string will be used. If you pass the special value ARANGODB_DEFAULT_ROOT_PASSWORD, the password will be set the value stored in the environment variable `ARANGODB_DEFAULT_ROOT_PASSWORD`. This can be used to pass an instance variable into ArangoDB. For example, the instance identifier from Amazon.
  • active: An optional flag that specifies whether the user is active. If not specified, this will default to true
  • extra: An optional JSON object with arbitrary extra data about the user
  • changePassword: An optional flag that specifies whethers the user must change the password or not. If not specified, this will default to false. If set to true, the only operations allowed are PUT /_api/user or PATCH /_api/user. All other operations executed by the user will result in an HTTP 403.
If the user can be added by the server, the server will respond with HTTP 201. In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [], + "responses": { + "201": { + "description": "Returned if the user can be added by the server
" + }, + "400": { + "description": "If the JSON representation is malformed or mandatory data is missing from the request.

" + } + }, + "summary": " Create User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/user/": { + "get": { + "description": "\n\n
Fetches data about all users.
The call will return a JSON object with at least the following attributes on success:
  • user: The name of the user as a string.
  • active: An optional flag that specifies whether the user is active.
  • extra: An optional JSON object with arbitrary extra data about the user.
  • changePassword: An optional flag that specifies whether the user must change the password or not.
", + "parameters": [], + "responses": { + "200": { + "description": "The users that were found

" + } + }, + "summary": " List available Users", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/user/{user}": { + "delete": { + "description": "\n\n
Removes an existing user, identified by user.
If the user can be removed, the server will respond with HTTP 202. In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "202": { + "description": "Is returned if the user was removed by the server
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Remove User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "get": { + "description": "\n\n
Fetches data about the specified user.
The call will return a JSON object with at least the following attributes on success:
  • user: The name of the user as a string.
  • active: An optional flag that specifies whether the user is active.
  • extra: An optional JSON object with arbitrary extra data about the user.
  • changePassword: An optional flag that specifies whether the user must change the password or not.
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "The user was found
" + }, + "404": { + "description": "The user with the specified name does not exist

" + } + }, + "summary": " Fetch User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "patch": { + "description": "\n\n
Partially updates the data of an existing user. The name of an existing user must be specified in user.
The following data can be passed in a JSON representation in the body of the POST request:
  • passwd: The user password as a string. Specifying a password is optional. If not specified, the previously existing value will not be modified.
  • active: An optional flag that specifies whether the user is active. If not specified, the previously existing value will not be modified.
  • extra: An optional JSON object with arbitrary extra data about the user. If not specified, the previously existing value will not be modified.
  • changePassword: An optional flag that specifies whether the user must change the password or not. If not specified, the previously existing value will not be modified.
If the user can be updated by the server, the server will respond with HTTP 200.
In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server
" + }, + "400": { + "description": "The JSON representation is malformed or mandatory data is missing from the request
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Update User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + }, + "put": { + "description": "\n\n
Replaces the data of an existing user. The name of an existing user must be specified in user.
The following data can to be passed in a JSON representation in the body of the POST request:
  • passwd: The user password as a string. Specifying a password is mandatory, but the empty string is allowed for passwords
  • active: An optional flag that specifies whether the user is active. If not specified, this will default to true
  • extra: An optional JSON object with arbitrary extra data about the user
  • changePassword: An optional flag that specifies whether the user must change the password or not. If not specified, this will default to false
If the user can be replaced by the server, the server will respond with HTTP 200.
In case of success, the returned JSON object has the following properties:
  • error: Boolean flag to indicate that an error occurred (false in this case)
  • code: The HTTP status code
In case of error, the body of the response will contain a JSON object with additional error details. The object has the following attributes:
  • error: Boolean flag to indicate that an error occurred (true in this case)
  • code: The HTTP status code
  • errorNum: The server error number
  • errorMessage: A descriptive error message
", + "parameters": [ + { + "description": "The name of the user
", + "format": "string", + "in": "path", + "name": "user", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server
" + }, + "400": { + "description": "The JSON representation is malformed or mandatory data is missing from the request
" + }, + "404": { + "description": "The specified user does not exist

" + } + }, + "summary": " Replace User", + "tags": [ + "User handling" + ], + "x-examples": [], + "x-filename": "User handling - js/actions/_api/user/app.js" + } + }, + "/_api/version": { + "get": { + "description": "\n\nReturns the server name and version number. The response is a JSON object with the following attributes:
**A json document with these Properties is returned:**
  • version: the server version string. The string has the format \"major.*minor.*sub\". major and minor will be numeric, and sub may contain a number or a textual version.
  • details: an optional JSON object with additional details. This is returned only if the details URL parameter is set to true in the request.
  • server: will always contain arango

Example: Return the version information

shell> curl --dump - http://localhost:8529/_api/version\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\" \n}\n

\n
Example: Return the version information with details

shell> curl --dump - http://localhost:8529/_api/version?details=true\n\nHTTP/1.1 200 OK\ncontent-type: application/json; charset=utf-8\n\n{ \n  \"server\" : \"arango\", \n  \"version\" : \"2.7.0-devel\", \n  \"details\" : { \n    \"architecture\" : \"64bit\", \n    \"build-date\" : \"2015-09-25 11:17:39\", \n    \"configure\" : \"'./configure' '--enable-relative' '--enable-maintainer-mode' '--with-backtrace' '--enable-v8-debug' 'CXXFLAGS=-O0 -ggdb -DDEBUG_CLUSTER_COMM' 'CFLAGS=-O0 -ggdb  -DDEBUG_CLUSTER_COMM'\", \n    \"env\" : \"CFLAGS='-O0 -ggdb  -DDEBUG_CLUSTER_COMM' CXXFLAGS='-O0 -ggdb -DDEBUG_CLUSTER_COMM'\", \n    \"fd-client-event-handler\" : \"poll\", \n    \"fd-setsize\" : \"1024\", \n    \"icu-version\" : \"54.1\", \n    \"libev-version\" : \"4.11\", \n    \"maintainer-mode\" : \"true\", \n    \"openssl-version\" : \"OpenSSL 1.0.2 22 Jan 2015\", \n    \"readline-version\" : \"6.3\", \n    \"repository-version\" : \"heads/devel-0-g43dd92bb4716d73c7128478b4a7cdb36fd200421\", \n    \"server-version\" : \"2.7.0-devel\", \n    \"sizeof int\" : \"4\", \n    \"sizeof void*\" : \"8\", \n    \"tcmalloc\" : \"false\", \n    \"v8-version\" : \"4.3.61\", \n    \"mode\" : \"standalone\" \n  } \n}\n

\n
", + "parameters": [ + { + "description": "If set to true, the response will contain a details attribute with additional information about included components and their versions. The attribute names and internals of the details object may vary depending on platform and ArangoDB version.
", + "in": "query", + "name": "details", + "required": false, + "type": "boolean" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "is returned in all cases.
", + "schema": { + "$ref": "#/definitions/JSF_get_api_return_rc_200" + }, + "x-description-offset": 165 + } + }, + "summary": " Return server version", + "tags": [ + "Administration" + ], + "x-examples": [], + "x-filename": "Administration - js/actions/_admin/app.js, js/actions/_admin/routing/app.js, js/actions/_admin/server/app.js, js/actions/_admin/database/app.js, arangod/RestHandler/RestShutdownHandler.cpp, arangod/RestHandler/RestAdminLogHandler.cpp, js/actions/api-tasks.js, js/actions/api-endpoint.js, arangod/RestHandler/RestVersionHandler.cpp, js/actions/api-system.js" + } + } + }, + "schemes": [ + "http" + ], + "swagger": "2.0" +} diff --git a/test-perf/src/main/resources/multi-docs.json b/test-perf/src/main/resources/multi-docs.json new file mode 100644 index 000000000..564038320 --- /dev/null +++ b/test-perf/src/main/resources/multi-docs.json @@ -0,0 +1,719 @@ +[ + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + }, + { + "error": true, + "errorNum": 11, + "code": 500, + "errorMessage": "bla" + }, + { + "properties": { + "doCompact": { + "description": "whether or not the collection will be compacted (default is true)
", + "format": "", + "type": "boolean" + }, + "indexBuckets": { + "description": "The: number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power of 2 and less than or equal to 1024.
For very large collections one should increase this to avoid long pauses when the hash table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. For example, 64 might be a sensible value for a collection with 100 000 000 documents. Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. Changes (see below) are applied when the collection is loaded the next time.
", + "format": "int64", + "type": "integer" + }, + "isSystem": { + "description": "If true, create a system collection. In this case collection-name should start with an underscore. End users should normally create non-system collections only. API implementors may be required to create system collections in very special occasions, but normally a regular collection will do. (The default is false)
", + "format": "", + "type": "boolean" + }, + "isVolatile": { + "description": "If true then the collection data is kept in-memory only and not made persistent. Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, and not for data that cannot be re-created otherwise. (The default is false)
", + "format": "", + "type": "boolean" + }, + "journalSize": { + "description": "The maximal size of a journal or datafile in bytes. The value must be at least `1048576` (1 MiB). (The default is a configuration parameter)
", + "format": "int64", + "type": "integer" + }, + "keyOptions": { + "$ref": "#/definitions/JSF_post_api_collection_opts" + }, + "name": { + "description": "The name of the collection.
", + "type": "string" + }, + "numberOfShards": { + "description": "(The default is 1): in a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless.
", + "format": "int64", + "type": "integer" + }, + "shardKeys": { + "description": "(The default is [ \"_key\" ]): in a cluster, this attribute determines which document attributes are used to determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
", + "type": "string" + }, + "type": { + "description": "(The default is 2): the type of the collection to create. The following values for type are valid:
  • 2: document collection
  • 3: edges collection
", + "format": "int64", + "type": "integer" + }, + "waitForSync": { + "description": "If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
", + "format": "", + "type": "boolean" + } + }, + "_rev": "uuu", + "_key": "iii", + "_id": "ooo/iii", + "required": [ + "name" + ], + "type": "object", + "x-filename": "Collections - js/actions/_api/collection/app.js" + } +] \ No newline at end of file diff --git a/test-resilience/pom.xml b/test-resilience/pom.xml index fbd7efdd1..3faff10a3 100644 --- a/test-resilience/pom.xml +++ b/test-resilience/pom.xml @@ -6,16 +6,12 @@ ../test-parent com.arangodb test-parent - 7.10.0 + 7.23.0 4.0.0 test-resilience - - true - - org.mock-server @@ -26,13 +22,7 @@ eu.rekawek.toxiproxy toxiproxy-java - 2.1.7 - test - - - org.awaitility - awaitility - 4.2.0 + 2.1.11 test @@ -48,7 +38,7 @@ io.netty netty-bom - 4.1.93.Final + 4.1.125.Final pom import diff --git a/test-resilience/src/test/java/resilience/MockTest.java b/test-resilience/src/test/java/resilience/MockTest.java new file mode 100644 index 000000000..c75ecc27a --- /dev/null +++ b/test-resilience/src/test/java/resilience/MockTest.java @@ -0,0 +1,40 @@ +package resilience; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDB; +import com.arangodb.Protocol; +import com.arangodb.internal.net.Communication; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.mockserver.integration.ClientAndServer; + +import java.util.Collections; + +import static org.mockserver.integration.ClientAndServer.startClientAndServer; + +public class MockTest extends SingleServerTest { + + protected ClientAndServer mockServer; + protected ArangoDB arangoDB; + + public MockTest() { + super(Collections.singletonMap(Communication.class, Level.DEBUG)); + } + + @BeforeEach + void before() { + mockServer = startClientAndServer(getEndpoint().getHost(), getEndpoint().getPort()); + arangoDB = new ArangoDB.Builder() + .protocol(Protocol.HTTP_JSON) + .password(PASSWORD) + .host("127.0.0.1", mockServer.getPort()) + .build(); + } + + @AfterEach + void after() { + arangoDB.shutdown(); + mockServer.stop(); + } + +} diff --git a/test-resilience/src/test/java/resilience/mock/SerdeTest.java b/test-resilience/src/test/java/resilience/mock/SerdeTest.java new file mode 100644 index 000000000..c0285b4be --- /dev/null +++ b/test-resilience/src/test/java/resilience/mock/SerdeTest.java @@ -0,0 +1,166 @@ +package resilience.mock; + +import ch.qos.logback.classic.Level; +import com.arangodb.ArangoDBException; +import com.arangodb.Request; +import com.arangodb.Response; +import com.arangodb.entity.MultiDocumentEntity; +import com.arangodb.util.RawJson; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonNode; +import org.junit.jupiter.api.Test; +import resilience.MockTest; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockserver.model.HttpRequest.request; +import static org.mockserver.model.HttpResponse.response; + +public class SerdeTest extends MockTest { + + @Test + void unparsableData() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withBody("upstream timed out") + ); + + logs.reset(); + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("[Unparsable data]") + .hasMessageContaining("Response: {statusCode=504,"); + Throwable[] suppressed = thrown.getCause().getSuppressed(); + assertThat(suppressed).hasSize(1); + assertThat(suppressed[0]) + .isInstanceOf(ArangoDBException.class) + .cause() + .isInstanceOf(JsonParseException.class); + assertThat(logs.getLogs()) + .filteredOn(e -> e.getLevel().equals(Level.DEBUG)) + .anySatisfy(e -> assertThat(e.getFormattedMessage()) + .contains("Received Response") + .contains("statusCode=504") + .contains("[Unparsable data]") + ); + } + + @Test + void textPlainData() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withHeader("Content-Type", "text/plain") + .withBody("upstream timed out") + ); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("upstream timed out"); + } + + @Test + void textPlainDataWithCharset() { + arangoDB.getVersion(); + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/.*/_api/version") + ) + .respond( + response() + .withStatusCode(504) + .withHeader("Content-Type", "text/plain; charset=utf-8") + .withBody("upstream timed out") + ); + + Throwable thrown = catchThrowable(() -> arangoDB.getVersion()); + assertThat(thrown) + .isInstanceOf(ArangoDBException.class) + .hasMessageContaining("upstream timed out"); + } + + @Test + void getDocumentsWithErrorField() { + List keys = Arrays.asList("1", "2", "3"); + + String resp = "[" + + "{\"error\":true,\"_key\":\"1\",\"_id\":\"col/1\",\"_rev\":\"_i4otI-q---\"}," + + "{\"_key\":\"2\",\"_id\":\"col/2\",\"_rev\":\"_i4otI-q--_\"}," + + "{\"_key\":\"3\",\"_id\":\"col/3\",\"_rev\":\"_i4otI-q--A\"}" + + "]"; + + mockServer + .when( + request() + .withMethod("PUT") + .withPath("/.*/_api/document/col") + .withQueryStringParameter("onlyget", "true") + ) + .respond( + response() + .withStatusCode(200) + .withHeader("Content-Type", "application/json; charset=utf-8") + .withBody(resp.getBytes(StandardCharsets.UTF_8)) + ); + + MultiDocumentEntity res = arangoDB.db().collection("col").getDocuments(keys, JsonNode.class); + assertThat(res.getErrors()).isEmpty(); + assertThat(res.getDocuments()).hasSize(3) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("1")) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("2")) + .anySatisfy(d -> assertThat(d.get("_key").textValue()).isEqualTo("3")); + } + + @Test + void getXArangoDumpJsonLines() { + String resp = "{\"a\":1}\n" + + "{\"b\":2}\n" + + "{\"c\":3}"; + + mockServer + .when( + request() + .withMethod("GET") + .withPath("/_db/foo/_api/foo") + ) + .respond( + response() + .withStatusCode(200) + .withHeader("Content-Type", "application/x-arango-dump; charset=utf-8") + .withBody(resp.getBytes(StandardCharsets.UTF_8)) + ); + + Response res = arangoDB.execute(Request.builder() + .method(Request.Method.GET) + .db("foo") + .path("/_api/foo") + .build(), RawJson.class); + assertThat(res.getBody().get()).endsWith("{\"c\":3}"); + } +} diff --git a/test-resilience/src/test/java/resilience/http/MockTest.java b/test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java similarity index 70% rename from test-resilience/src/test/java/resilience/http/MockTest.java rename to test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java index 6cc81a495..358311cf4 100644 --- a/test-resilience/src/test/java/resilience/http/MockTest.java +++ b/test-resilience/src/test/java/resilience/mock/ServiceUnavailableTest.java @@ -1,42 +1,17 @@ -package resilience.http; +package resilience.mock; import ch.qos.logback.classic.Level; -import com.arangodb.ArangoDB; -import com.arangodb.Protocol; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockserver.integration.ClientAndServer; import org.mockserver.matchers.Times; -import resilience.SingleServerTest; +import resilience.MockTest; import java.util.concurrent.ExecutionException; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockserver.integration.ClientAndServer.startClientAndServer; import static org.mockserver.model.HttpRequest.request; import static org.mockserver.model.HttpResponse.response; -class MockTest extends SingleServerTest { - - private ClientAndServer mockServer; - private ArangoDB arangoDB; - - @BeforeEach - void before() { - mockServer = startClientAndServer(getEndpoint().getHost(), getEndpoint().getPort()); - arangoDB = new ArangoDB.Builder() - .protocol(Protocol.HTTP_JSON) - .password(PASSWORD) - .host("127.0.0.1", mockServer.getPort()) - .build(); - } - - @AfterEach - void after() { - arangoDB.shutdown(); - mockServer.stop(); - } +class ServiceUnavailableTest extends MockTest { @Test void retryOn503() { @@ -85,4 +60,6 @@ void retryOn503Async() throws ExecutionException, InterruptedException { .filteredOn(e -> e.getLevel().equals(Level.WARN)) .anyMatch(e -> e.getFormattedMessage().contains("Could not connect to host")); } + + } diff --git a/tutorial/README.md b/tutorial/README.md index b7b2c67ac..95edb57f4 100644 --- a/tutorial/README.md +++ b/tutorial/README.md @@ -1,3 +1,4 @@ # ArangoDB Java driver tutorial -Code for ArangoDB [Java driver tutorial](https://university.arangodb.com/courses/java-driver-tutorial-v7/). +This folder contains the code for the +[Java driver tutorial](https://docs.arangodb.com/stable/develop/drivers/java/). diff --git a/tutorial/Tutorial.md b/tutorial/Tutorial.md index f6a386efd..fccf40d45 100644 --- a/tutorial/Tutorial.md +++ b/tutorial/Tutorial.md @@ -1,52 +1,87 @@ -# Tutorial: Java in 10 Minutes +# ArangoDB Java driver -This is a short tutorial with the [Java Driver](https://github.com/arangodb/arangodb-java-driver) and ArangoDB. In less -than 10 minutes you can learn how to use ArangoDB Java driver in Maven and Gradle projects. +The official ArangoDB Java Driver. +- Repository: +- [Code examples](https://github.com/arangodb/arangodb-java-driver/tree/main/test-non-functional/src/test/java/example) +- [Reference](reference-version-7/_index.md) (driver setup, serialization, changes in version 7) +- [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) (generated reference documentation) +- [ChangeLog](https://github.com/arangodb/arangodb-java-driver/blob/main/ChangeLog.md) + +## Supported versions + +Version 7 is the latest supported and actively developed release. + +The driver is compatible with all supported stable versions of ArangoDB server, see +[Product Support End-of-life Announcements](https://arangodb.com/subscriptions/end-of-life-notice/). + +The driver is compatible with JDK 8 and higher versions. + +{{< warning >}} +Version 6 reached End of Life (EOL) and is not actively developed anymore. +Upgrading to version 7 is recommended. + +The API changes between version 6 and 7 are documented in +[Changes in version 7](reference-version-7/changes-in-version-7.md). +{{< /warning >}} ## Project configuration -To use the ArangoDB Java driver, you need to import -[arangodb-java-driver](https://github.com/arangodb/arangodb-java-driver) -as a library into your project. +To use the ArangoDB Java driver, you need to import `arangodb-java-driver` as a +library into your project. This is described below for the popular Java build +automation systems Maven and Gradle. + +### Maven -In a Maven project, you need to add the following dependency to `pom.xml`: +To add the driver to your project with Maven, add the following code to your +`pom.xml` (substitute `7.x.x` with the latest driver version): ```xml - - com.arangodb - arangodb-java-driver - ... - + + com.arangodb + arangodb-java-driver + 7.x.x + ``` -In a Gradle project, you need to add the following to `build.gradle`: +### Gradle + +To add the driver to your project with Gradle, add the following code to your +`build.gradle` (substitute `7.x.x` with the latest driver version): ```groovy +repositories { + mavenCentral() +} + dependencies { - implementation 'com.arangodb:arangodb-java-driver:...' + implementation 'com.arangodb:arangodb-java-driver:7.x.x' } ``` +## Tutorial -## Connection +### Connect to ArangoDB -Let's configure and open a connection to start ArangoDB. +Let's configure and open a connection to ArangoDB. The default connection is to +`127.0.0.1:8529`. Change the connection details to point to your specific instance. ```java ArangoDB arangoDB = new ArangoDB.Builder() .host("localhost", 8529) + .user("root") + .password("") .build(); ``` -> **Hint:** The default connection is to 127.0.0.1:8529. +For more connections options and details, see +[Driver setup](reference-version-7/driver-setup.md). +### Create a database -## Creating a database - -Let’s create a new database: +Let's create a new database: ```java ArangoDatabase db = arangoDB.db("mydb"); @@ -54,10 +89,9 @@ System.out.println("Creating database..."); db.create(); ``` +### Create a collection -## Creating a collection - -Now let’s create our first collection: +Now let's create our first collection: ```java ArangoCollection collection = db.collection("firstCollection"); @@ -65,14 +99,13 @@ System.out.println("Creating collection..."); collection.create(); ``` +### Create a document -## Creating a document - -Now we create a document in the collection. Any object can be added as a document to the database and be retrieved from -the database as an object. +Let's create a document in the collection. Any object can be added as a document +to the database and be retrieved from the database as an object. -For this example we use the class BaseDocument, provided with the driver. The attributes of the document are stored in a -map as key/value pair: +This example uses the `BaseDocument` class, provided with the driver. The +attributes of the document are stored in a map as `key`/`value` pair: ```java String key = "myKey"; @@ -85,14 +118,13 @@ collection.insertDocument(doc); Some details you should know about the code: -- the document key is passed to the `BaseDocument` constructor -- `addAttribute()` puts a new key/value pair into the document -- each attribute is stored as a single key value pair in the document root +- The document key is passed to the `BaseDocument` constructor +- The `addAttribute()` method puts a new key/value pair into the document +- Each attribute is stored as a single key value pair in the document root +### Read a document -## Read a document - -To read the created document: +Read the created document: ```java System.out.println("Reading document..."); @@ -102,7 +134,7 @@ System.out.println("Attribute a: " + readDocument.getAttribute("a")); System.out.println("Attribute b: " + readDocument.getAttribute("b")); ``` -After executing this program the console output should be: +After executing this program, the console output should be: ```text Key: myKey @@ -112,12 +144,14 @@ Attribute b: 42 Some details you should know about the code: -- `getDocument()` reads the stored document data and deserilizes it into the given class (`BaseDocument`) - +- The `getDocument()` method reads the stored document data and deserializes it + into the given class (`BaseDocument`) -## Creating a document from Jackson JsonNode +### Create a document from Jackson JsonNode -We can also create a document from a Jackson [JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html) object: +You can also create a document from a Jackson +[JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html) +object: ```java System.out.println("Creating a document from Jackson JsonNode..."); @@ -130,10 +164,10 @@ System.out.println("Inserting document from Jackson JsonNode..."); collection.insertDocument(jsonNode); ``` +### Read a document as Jackson JsonNode -## Read a document as Jackson JsonNode - -Documents can also be read as Jackson [JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html): +You can also read a document as a Jackson +[JsonNode](https://fasterxml.github.io/jackson-databind/javadoc/2.13/com/fasterxml/jackson/databind/JsonNode.html): ```java System.out.println("Reading document as Jackson JsonNode..."); @@ -143,7 +177,7 @@ System.out.println("Attribute a: " + readJsonNode.get("a").textValue()); System.out.println("Attribute b: " + readJsonNode.get("b").intValue()); ``` -After executing this program the console output should be: +After executing this program, the console output should be: ```text Key: myKey @@ -153,12 +187,12 @@ Attribute b: 53 Some details you should know about the code: -- `getDocument()` returns the stored document as instance of `com.fasterxml.jackson.databind.JsonNode`. - +- The `getDocument()` method returns the stored document as instance of + `com.fasterxml.jackson.databind.JsonNode`. -## Creating a document from JSON String +### Create a document from JSON String -Documents can also be created from raw JSON strings: +You can also create a document from raw JSON string: ```java System.out.println("Creating a document from JSON String..."); @@ -168,9 +202,9 @@ System.out.println("Inserting document from JSON String..."); collection.insertDocument(json); ``` -## Read a document as JSON String +### Read a document as JSON String -Documents can also be read as raw JSON strings: +You can also read a document as raw JSON string: ```java System.out.println("Reading document as JSON String..."); @@ -178,14 +212,13 @@ RawJson readJson = collection.getDocument(keyJson, RawJson.class); System.out.println(readJson.get()); ``` -After executing this program the console output should be: +After executing this program, the console output should be: ```text {"_key":"myJsonKey","_id":"firstCollection/myJsonKey","_rev":"_e0nEe2y---","a":"Baz","b":64} ``` - -## Update a document +### Update a document Let's update the document: @@ -195,10 +228,9 @@ System.out.println("Updating document ..."); collection.updateDocument(key, doc); ``` +### Read the document again -## Read the document again - -Let’s read the document again: +Let's read the document again: ```java System.out.println("Reading updated document ..."); @@ -209,7 +241,7 @@ System.out.println("Attribute b: " + updatedDocument.getAttribute("b")); System.out.println("Attribute c: " + updatedDocument.getAttribute("c")); ``` -After executing this program the console output should look like this: +After executing this program, the console output should look like this: ```text Key: myKey @@ -218,20 +250,19 @@ Attribute b: 42 Attribute c: Bar ``` +### Delete a document -## Delete a document - -Let’s delete a document: +Let's delete a document: ```java System.out.println("Deleting document ..."); collection.deleteDocument(key); ``` +### Execute AQL queries -## Execute AQL queries - -First we need to create some documents with the name Homer in collection firstCollection: +First, you need to create some documents with the name `Homer` in the +collection called `firstCollection`: ```java for (int i = 0; i < 10; i++) { @@ -241,7 +272,8 @@ for (int i = 0; i < 10; i++) { } ``` -Get all documents with the name Homer from collection firstCollection and iterate over the result: +Get all documents with the name `Homer` from the collection using an AQL query +and iterate over the results: ```java String query = "FOR t IN firstCollection FILTER t.name == @name RETURN t"; @@ -251,7 +283,7 @@ ArangoCursor cursor = db.query(query, bindVars, null, BaseDocument cursor.forEach(aDocument -> System.out.println("Key: " + aDocument.getKey())); ``` -After executing this program the console output should look something like this: +After executing this program, the console output should look something like this: ```text Key: 1 @@ -268,14 +300,14 @@ Key: 6 Some details you should know about the code: -- the AQL query uses the placeholder `@name` which has to be bind to a value -- `query()` executes the defined query and returns a `ArangoCursor` with the given class (here: `BaseDocument`) -- the order is not guaranteed - +- The AQL query uses the placeholder `@name` that has to be bound to a value +- The `query()` method executes the defined query and returns an `ArangoCursor` + with the given class (here: `BaseDocument`) +- The order of is not guaranteed -## Delete a document with AQL +### Delete documents with AQL -Now we will delete the document created before: +Delete previously created documents: ```java String query = "FOR t IN firstCollection FILTER t.name == @name " @@ -286,7 +318,7 @@ ArangoCursor cursor = db.query(query, bindVars, null, BaseDocument cursor.forEach(aDocument -> System.out.println("Removed document " + aDocument.getKey())); ``` -After executing this program the console output should look something like this: +After executing this program, the console output should look something like this: ```text Removed document: 1 @@ -301,7 +333,142 @@ Removed document: 8 Removed document: 6 ``` -## Learn more +### Learn more + +- Have a look at the [AQL documentation](../../../aql/) to lear about the + query language +- See [Serialization](reference-version-7/serialization.md) for details about + user-data serde +- For the full reference documentation, see + [JavaDoc](https://www.javadoc.io/doc/com.arangodb/arangodb-java-driver/latest/index.html) + +## GraalVM Native Image + +The driver supports GraalVM Native Image compilation. +To compile with `--link-at-build-time` when `http-protocol` module is present in +the classpath, additional substitutions are required for transitive dependencies +`Netty` and `Vert.x`. See this +[example](https://github.com/arangodb/arangodb-java-driver/tree/main/test-functional/src/test-default/java/graal) +for reference. Such substitutions are not required when compiling the shaded driver. + +### Framework compatibility + +The driver can be used in the following frameworks that support +GraalVM Native Image generation: + +- [Quarkus](https://quarkus.io), see [arango-quarkus-native-example](https://github.com/arangodb-helper/arango-quarkus-native-example) +- [Helidon](https://helidon.io), see [arango-helidon-native-example](https://github.com/arangodb-helper/arango-helidon-native-example) +- [Micronaut](https://micronaut.io), see [arango-micronaut-native-example](https://github.com/arangodb-helper/arango-micronaut-native-example) + +## ArangoDB Java Driver Shaded + +A shaded variant of the driver is also published with +Maven coordinates: `com.arangodb:arangodb-java-driver-shaded`. + +It bundles and relocates the following packages: +- `com.fasterxml.jackson` +- `com.arangodb.jackson.dataformat.velocypack` +- `io.vertx` +- `io.netty` + +Note that the **internal serde** internally uses Jackson classes from +`com.fasterxml.jackson` that are relocated to `com.arangodb.shaded.fasterxml.jackson`. +Therefore, the **internal serde** of the shaded driver is not compatible with +Jackson annotations and modules from package`com.fasterxml.jackson`, but only +with their relocated variants. In case the **internal serde** is used as +**user-data serde**, the annotations from package `com.arangodb.serde` can be +used to annotate fields, parameters, getters and setters for mapping values +representing ArangoDB documents metadata (`_id`, `_key`, `_rev`, `_from`, `_to`): +- `@InternalId` +- `@InternalKey` +- `@InternalRev` +- `@InternalFrom` +- `@InternalTo` + +These annotations are compatible with relocated Jackson classes. +Note that the **internal serde** is not part of the public API and could change +in future releases without notice, thus breaking client applications relying on +it to serialize or deserialize user-data. It is therefore recommended also in +this case either: +- using the default user-data serde `JacksonSerde` + (from packages `com.arangodb:jackson-serde-json` or `com.arangodb:jackson-serde-vpack`), or +- providing a custom user-data serde implementation via `ArangoDB.Builder.serde(ArangoSerde)`. + +## Support for extended naming constraints + +The driver supports ArangoDB's **extended** naming constraints/convention, +allowing most UTF-8 characters in the names of: +- Databases +- Collections +- Views +- Indexes + +These names must be NFC-normalized, otherwise the server returns an error. +To normalize a string, use the function +`com.arangodb.util.UnicodeUtils.normalize(String): String`: + +```java +String normalized = UnicodeUtils.normalize("π”Έπ•£π•’π•Ÿπ•˜π• π”»π”Ή"); +``` + +To check if a string is already normalized, use the +function `com.arangodb.util.UnicodeUtils.isNormalized(String): boolean`: + +```java +boolean isNormalized = UnicodeUtils.isNormalized("π”Έπ•£π•’π•Ÿπ•˜π• π”»π”Ή"); +``` + +## Async API + +The asynchronous API is accessible via `ArangoDB#async()`, for example: + +```java +ArangoDB adb = new ArangoDB.Builder() + // ... + .build(); +ArangoDBAsync adbAsync = adb.async(); +CompletableFuture version = adbAsync.getVersion(); +// ... +``` + +Under the hood, both synchronous and asynchronous API use the same internal +communication layer, which has been reworked and re-implemented in an +asynchronous way. The synchronous API blocks and waits for the result, while the +asynchronous one returns a `CompletableFuture<>` representing the pending +operation being performed. +Each asynchronous API method is equivalent to the corresponding synchronous +variant, except for the Cursor API. + +### Async Cursor API + +The Cursor API (`ArangoCursor` and `ArangoCursorAsync`) is intrinsically different, +because the synchronous Cursor API is based on Java's `java.util.Iterator`, which +is an interface only suitable for synchronous scenarios. +On the other side, the asynchronous Cursor API provides a method +`com.arangodb.ArangoCursorAsync#nextBatch()`, which returns a +`CompletableFuture>` and can be used to consume the next +batch of the cursor, for example: + +```java +CompletableFuture> future1 = adbAsync.db() + .query("FOR i IN i..10000", Integer.class); +CompletableFuture> future2 = future1 + .thenCompose(c -> { + List batch = c.getResult(); + // ... + // consume batch + // ... + return c.nextBatch(); + }); +// ... +``` + +## Data Definition Classes + +Classes used to exchange data definitions, in particular classes in the packages +`com.arangodb.entity.**` and `com.arangodb.model.**`, are meant to be serialized +and deserialized internally by the driver. -- Have a look at the [AQL documentation](https://docs.arangodb.com/stable/aql/) to learn more about the query language. -- Also check out the documentation about ArangoDB's [Data Models](https://docs.arangodb.com/stable/concepts/data-models/) +The behavior to serialize and deserialize these classes is considered an internal +implementation detail, and as such, it might change without prior notice. +The API with regard to the public members of these classes is kept compatible. diff --git a/tutorial/gradle/build.gradle b/tutorial/gradle/build.gradle index 39fe2fa06..19f345244 100644 --- a/tutorial/gradle/build.gradle +++ b/tutorial/gradle/build.gradle @@ -12,7 +12,7 @@ repositories { } dependencies { - implementation 'com.arangodb:arangodb-java-driver:7.10.0' + implementation 'com.arangodb:arangodb-java-driver:7.23.0' } ext { diff --git a/tutorial/maven/pom.xml b/tutorial/maven/pom.xml index 7f8f634f5..14bc5457a 100644 --- a/tutorial/maven/pom.xml +++ b/tutorial/maven/pom.xml @@ -19,7 +19,7 @@ com.arangodb arangodb-java-driver - 7.10.0 + 7.23.0 diff --git a/vst-protocol/pom.xml b/vst-protocol/pom.xml index 19f131b6c..280039898 100644 --- a/vst-protocol/pom.xml +++ b/vst-protocol/pom.xml @@ -5,10 +5,9 @@ 4.0.0 - ../release-parent com.arangodb - release-parent - 7.10.0 + arangodb-java-driver-parent + 7.23.0 vst-protocol @@ -17,7 +16,6 @@ com.arangodb.vst - false diff --git a/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java b/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java index f0faca44f..1db7852a0 100644 --- a/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java +++ b/vst-protocol/src/main/java/com/arangodb/vst/VstConnectionFactoryAsync.java @@ -25,6 +25,7 @@ import com.arangodb.internal.config.ArangoConfig; import com.arangodb.internal.net.Connection; import com.arangodb.internal.net.ConnectionFactory; +import com.arangodb.internal.net.ConnectionPool; import com.arangodb.vst.internal.VstConnectionAsync; /** @@ -35,8 +36,8 @@ public class VstConnectionFactoryAsync implements ConnectionFactory { @Override @UnstableApi - public Connection create(@UnstableApi final ArangoConfig config, final HostDescription host) { - return new VstConnectionAsync(config, host); + public Connection create(@UnstableApi final ArangoConfig config, final HostDescription host, @UnstableApi final ConnectionPool pool) { + return new VstConnectionAsync(config, host, pool); } } diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java index ddd886d10..870051fa6 100644 --- a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnection.java @@ -25,6 +25,7 @@ import com.arangodb.internal.ArangoDefaults; import com.arangodb.internal.config.ArangoConfig; import com.arangodb.internal.net.Connection; +import com.arangodb.internal.net.ConnectionPool; import com.arangodb.velocypack.VPackBuilder; import com.arangodb.velocypack.VPackSlice; import com.arangodb.velocypack.ValueType; @@ -34,7 +35,6 @@ import javax.net.SocketFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocket; -import javax.net.ssl.SSLSocketFactory; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; @@ -68,6 +68,7 @@ public abstract class VstConnection implements Connection { private final HostDescription host; private final Map sendTimestamps = new ConcurrentHashMap<>(); private final String connectionName; + private final ConnectionPool pool; private final byte[] keepAliveRequest = new VPackBuilder() .add(ValueType.ARRAY) .add(1) @@ -89,7 +90,7 @@ public abstract class VstConnection implements Connection { private OutputStream outputStream; private InputStream inputStream; - protected VstConnection(final ArangoConfig config, final HostDescription host) { + protected VstConnection(final ArangoConfig config, final HostDescription host, final ConnectionPool pool) { super(); timeout = config.getTimeout(); ttl = config.getConnectionTtl(); @@ -97,6 +98,7 @@ protected VstConnection(final ArangoConfig config, final HostDescription host) { useSsl = config.getUseSsl(); sslContext = config.getSslContext(); this.host = host; + this.pool = pool; connectionName = "connection_" + System.currentTimeMillis() + "_" + Math.random(); LOGGER.debug("[" + connectionName + "]: Connection created"); @@ -147,11 +149,7 @@ public synchronized void open() throws IOException { LOGGER.debug(String.format("[%s]: Open connection to %s", connectionName, host)); } if (Boolean.TRUE.equals(useSsl)) { - if (sslContext != null) { - socket = sslContext.getSocketFactory().createSocket(); - } else { - socket = SSLSocketFactory.getDefault().createSocket(); - } + socket = sslContext.getSocketFactory().createSocket(); } else { socket = SocketFactory.getDefault().createSocket(); } @@ -244,6 +242,11 @@ public synchronized void close() { } } + @Override + public void release() { + pool.release(this); + } + private synchronized void sendProtocolHeader() throws IOException { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("[%s]: Send velocystream protocol header to %s", connectionName, socket)); diff --git a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java index 8b74cbd57..5b128340e 100644 --- a/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java +++ b/vst-protocol/src/main/java/com/arangodb/vst/internal/VstConnectionAsync.java @@ -25,6 +25,7 @@ import com.arangodb.internal.InternalRequest; import com.arangodb.internal.InternalResponse; import com.arangodb.internal.config.ArangoConfig; +import com.arangodb.internal.net.ConnectionPool; import com.arangodb.internal.serde.InternalSerde; import com.arangodb.velocypack.VPackSlice; import com.arangodb.velocypack.exception.VPackParserException; @@ -51,8 +52,8 @@ public class VstConnectionAsync extends VstConnection private final InternalSerde serde; - public VstConnectionAsync(final ArangoConfig config, final HostDescription host) { - super(config, host); + public VstConnectionAsync(final ArangoConfig config, final HostDescription host, final ConnectionPool pool) { + super(config, host, pool); chunkSize = config.getChunkSize(); serde = config.getInternalSerde(); } @@ -98,7 +99,7 @@ public CompletableFuture executeAsync(final InternalRequest re return; } rfuture.complete(response); - } else { + } else { Throwable e = ex instanceof CompletionException ? ex.getCause() : ex; rfuture.completeExceptionally(e); }