From 8de5445d75c74350c380589739967df48359b81b Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 15 Dec 2025 06:03:52 +0000 Subject: [PATCH] Auto-generated API code --- docs/reference/api-reference.md | 143 ++++++++++++++--------------- src/api/api/index.ts | 2 +- src/api/api/ml.ts | 2 +- src/api/types.ts | 154 ++++++++++++++++++-------------- 4 files changed, 163 insertions(+), 138 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index 7d7ef29bb..fe869f90b 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -992,6 +992,7 @@ PUT my-index-000001/_doc/1?version=2&version_type=external "id": "elkbee" } } +``` In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). @@ -1678,11 +1679,11 @@ client.searchMvt({ index, field, zoom, x, y }) #### Request (object) [_request_search_mvt] -- **`index` (string \| string[])**: List of data streams, indices, or aliases to search -- **`field` (string)**: Field containing geospatial data to return -- **`zoom` (number)**: Zoom level for the vector tile to search -- **`x` (number)**: X coordinate for the vector tile to search -- **`y` (number)**: Y coordinate for the vector tile to search +- **`index` (string \| string[])**: A list of indices, data streams, or aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. To search a remote cluster, use the `:` syntax. +- **`field` (string)**: A field that contains the geospatial data to return. It must be a `geo_point` or `geo_shape` field. The field must have doc values enabled. It cannot be a nested field. NOTE: Vector tiles do not natively support geometry collections. For `geometrycollection` values in a `geo_shape` field, the API returns a hits layer feature for each element of the collection. This behavior may change in a future release. +- **`zoom` (number)**: The zoom level of the vector tile to search. It accepts `0` to `29`. +- **`x` (number)**: The X coordinate for the vector tile to search. +- **`y` (number)**: The Y coordinate for the vector tile to search. - **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. - **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. - **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. @@ -2212,21 +2213,23 @@ When the async search completes within the timeout, the response won’t include - **`keep_alive` (Optional, string \| -1 \| 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) - **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout - **`analyzer` (Optional, string)**: The analyzer to use for the query string -- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) +- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed - **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). - **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. - **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query (AND or OR) - **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both - **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored -- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests -- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. +This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on - **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). @@ -3097,7 +3100,7 @@ client.ccr.forgetFollower({ index }) ### Arguments [_arguments_ccr.forget_follower] #### Request (object) [_request_ccr.forget_follower] -- **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed +- **`index` (string)**: Name of the leader index for which specified follower retention leases should be removed - **`follower_cluster` (Optional, string)** - **`follower_index` (Optional, string)** - **`follower_index_uuid` (Optional, string)** @@ -3246,7 +3249,7 @@ client.ccr.resumeFollow({ index }) ### Arguments [_arguments_ccr.resume_follow] #### Request (object) [_request_ccr.resume_follow] -- **`index` (string)**: The name of the follow index to resume following. +- **`index` (string)**: Name of the follow index to resume following - **`max_outstanding_read_requests` (Optional, number)** - **`max_outstanding_write_requests` (Optional, number)** - **`max_read_request_operation_count` (Optional, number)** @@ -3411,7 +3414,7 @@ client.cluster.getComponentTemplate({ ... }) Wildcard (`*`) expressions are supported. - **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - **`settings_filter` (Optional, string \| string[])**: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys -- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) +- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template - **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. @@ -3641,9 +3644,9 @@ client.cluster.putSettings({ ... }) #### Request (object) [_request_cluster.put_settings] - **`persistent` (Optional, Record)**: The settings that persist after the cluster restarts. - **`transient` (Optional, Record)**: The settings that do not persist after the cluster restarts. -- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) -- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node -- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout +- **`flat_settings` (Optional, boolean)**: Return settings in flat format +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. ## client.cluster.remoteInfo [_cluster.remote_info] Get remote cluster information. @@ -3730,13 +3733,14 @@ client.cluster.state({ ... }) ### Arguments [_arguments_cluster.state] #### Request (object) [_request_cluster.state] -- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics. - **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. -- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both +- **`flat_settings` (Optional, boolean)**: Return settings in flat format - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) -- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node - **`master_timeout` (Optional, string \| -1 \| 0)**: Timeout for waiting for new cluster state in case it is blocked - **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version - **`wait_for_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for wait_for_metadata_version before timing out @@ -4326,8 +4330,8 @@ client.danglingIndices.deleteDanglingIndex({ index_uuid }) #### Request (object) [_request_dangling_indices.delete_dangling_index] - **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. - **`accept_data_loss` (Optional, boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. ## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] Import a dangling index. @@ -4347,8 +4351,8 @@ client.danglingIndices.importDanglingIndex({ index_uuid }) - **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. - **`accept_data_loss` (Optional, boolean)**: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. ## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] Get the dangling indices. @@ -4548,7 +4552,8 @@ If false, the sequence query will return successfully, but will always have empt - **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution - **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. @@ -5651,10 +5656,11 @@ client.indices.deleteDataLifecycle({ name }) ### Arguments [_arguments_indices.delete_data_lifecycle] #### Request (object) [_request_indices.delete_data_lifecycle] -- **`name` (string \| string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`name` (string \| string[])**: A list of data streams of which the data stream lifecycle will be deleted. +Use `*` to get all data streams - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. ## client.indices.deleteDataStream [_indices.delete_data_stream] Delete data streams. @@ -5688,10 +5694,11 @@ client.indices.deleteDataStreamOptions({ name }) ### Arguments [_arguments_indices.delete_data_stream_options] #### Request (object) [_request_indices.delete_data_stream_options] -- **`name` (string \| string[])**: A list of data streams of which the data stream options will be deleted; use `*` to get all data streams -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master -- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document +- **`name` (string \| string[])**: A list of data streams of which the data stream options will be deleted. +Use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. ## client.indices.deleteIndexTemplate [_indices.delete_index_template] Delete an index template. @@ -5904,8 +5911,8 @@ client.indices.explainDataLifecycle({ index }) #### Request (object) [_request_indices.explain_data_lifecycle] - **`index` (string \| string[])**: List of index names to explain -- **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle -- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`include_defaults` (Optional, boolean)**: Indicates if the API should return the default values the system uses for the index's lifecycle +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. ## client.indices.fieldUsageStats [_indices.field_usage_stats] Get field usage stats. @@ -6038,13 +6045,14 @@ client.indices.forcemerge({ ... }) #### Request (object) [_request_indices.forcemerge] - **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. -- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) +- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) -- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) +- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (defayult: dynamic) - **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents -- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. +- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed ## client.indices.get [_indices.get] Get index information. @@ -6510,7 +6518,7 @@ client.indices.promoteDataStream({ name }) ### Arguments [_arguments_indices.promote_data_stream] #### Request (object) [_request_indices.promote_data_stream] -- **`name` (string)**: The name of the data stream +- **`name` (string)**: The name of the data stream to promote - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ## client.indices.putAlias [_indices.put_alias] @@ -6735,7 +6743,7 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. - **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`cause` (Optional, string)**: User defined reason for creating/updating the index template +- **`cause` (Optional, string)**: User defined reason for creating or updating the index template ## client.indices.putMapping [_indices.put_mapping] Update field mappings. @@ -6762,7 +6770,8 @@ client.indices.putMapping({ index }) ### Arguments [_arguments_indices.put_mapping] #### Request (object) [_request_indices.put_mapping] -- **`index` (string \| string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`index` (string \| string[])**: A list of index names the mapping should be added to (supports wildcards). +Use `_all` or omit to add the mapping on all indices. - **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. - **`dynamic` (Optional, Enum("strict" \| "runtime" \| true \| false))**: Controls whether new fields are added dynamically. - **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked @@ -6936,7 +6945,7 @@ To unset a version, replace the template without specifying one. - **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`cause` (Optional, string)**: User defined reason for creating/updating the index template +- **`cause` (Optional, string)**: User defined reason for creating or updating the index template ## client.indices.recovery [_indices.recovery] Get index recovery information. @@ -7050,7 +7059,8 @@ client.indices.reloadSearchAnalyzers({ index }) #### Request (object) [_request_indices.reload_search_analyzers] - **`index` (string \| string[])**: A list of index names to reload analyzers for -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable @@ -7544,7 +7554,7 @@ client.indices.stats({ ... }) ### Arguments [_arguments_indices.stats] #### Request (object) [_request_indices.stats] -- **`metric` (Optional, string \| string[])**: Limit the information returned the specific metrics. +- **`metric` (Optional, string \| string[])**: Limit the information returned the specific metrics - **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument @@ -8673,7 +8683,7 @@ Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -- **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) +- **`summary` (Optional, boolean)**: Return pipelines without their definitions ## client.ingest.processorGrok [_ingest.processor_grok] Run a grok processor. @@ -8892,7 +8902,7 @@ client.license.postStartBasic({ ... }) ### Arguments [_arguments_license.post_start_basic] #### Request (object) [_request_license.post_start_basic] -- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`acknowledge` (Optional, boolean)**: Whether the user has acknowledged acknowledge messages - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -8915,8 +8925,8 @@ client.license.postStartTrial({ ... }) ### Arguments [_arguments_license.post_start_trial] #### Request (object) [_request_license.post_start_trial] -- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) -- **`type` (Optional, string)**: The type of trial license to generate (default: "trial") +- **`acknowledge` (Optional, boolean)**: Whether the user has acknowledged acknowledge messages +- **`type` (Optional, string)**: The type of trial license to generate - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. ## client.logstash.deletePipeline [_logstash.delete_pipeline] @@ -10679,7 +10689,7 @@ Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. -- **`priority` (Optional, Enum("normal" \| "low"))**: The deployment priority. +- **`priority` (Optional, Enum("normal" \| "low"))**: The deployment priority - **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. - **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases @@ -11076,7 +11086,7 @@ a task from an empty queue) are filtered out. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - **`type` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The type to sample. -- **`sort` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The sort order for 'cpu' type (default: total) +- **`sort` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The sort order for 'cpu' type ## client.nodes.info [_nodes.info] Get node information. @@ -11138,7 +11148,7 @@ client.nodes.stats({ ... }) #### Request (object) [_request_nodes.stats] - **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. -- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. - **`index_metric` (Optional, string \| string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. - **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. @@ -11162,21 +11172,13 @@ client.nodes.usage({ ... }) ### Arguments [_arguments_nodes.usage] #### Request (object) [_request_nodes.usage] -- **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information. +Use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. - **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. A list of the following options: `_all`, `rest_actions`. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## client.project.tags [_project.tags] -Get tags. - -Get the tags that are defined for the project. -```ts -client.project.tags() -``` - - ## client.queryRules.deleteRule [_query_rules.delete_rule] Delete a query rule. @@ -11764,8 +11766,9 @@ client.searchableSnapshots.clearCache({ ... }) #### Request (object) [_request_searchable_snapshots.clear_cache] - **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). -- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. -- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. +(This includes `_all` string or when no indices have been specified) - **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) ## client.searchableSnapshots.mount [_searchable_snapshots.mount] @@ -12180,7 +12183,7 @@ They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and unde NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. -- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.delegatePki [_security.delegate_pki] Delegate PKI authentication. @@ -12288,7 +12291,7 @@ client.security.deleteServiceToken({ namespace, service, name }) - **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - **`service` (string)**: The service name. - **`name` (string)**: The name of the service account token. -- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. ## client.security.deleteUser [_security.delete_user] Delete users. @@ -13716,7 +13719,7 @@ client.slm.getLifecycle({ ... }) ### Arguments [_arguments_slm.get_lifecycle] #### Request (object) [_request_slm.get_lifecycle] -- **`policy_id` (Optional, string \| string[])**: List of snapshot lifecycle policies to retrieve +- **`policy_id` (Optional, string \| string[])**: A list of snapshot lifecycle policy identifiers. - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. @@ -15870,8 +15873,8 @@ If both this value and the `throttle_period_in_millis` parameter are specified, - **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. - **`active` (Optional, boolean)**: The initial state of the watch. The default value is `true`, which means the watch is active by default. -- **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term -- **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number +- **`if_primary_term` (Optional, number)**: Only update the watch if the last operation that has changed the watch has the specified primary term +- **`if_seq_no` (Optional, number)**: Only update the watch if the last operation that has changed the watch has the specified sequence number - **`version` (Optional, number)**: Explicit version number for concurrency control ## client.watcher.queryWatches [_watcher.query_watches] @@ -15996,7 +15999,7 @@ client.xpack.info({ ... }) #### Request (object) [_request_xpack.info] - **`categories` (Optional, Enum("build" \| "features" \| "license")[])**: A list of the information categories to include in the response. For example, `build,license,features`. -- **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true +- **`accept_enterprise` (Optional, boolean)**: If used, this otherwise ignored parameter must be set to true - **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. diff --git a/src/api/api/index.ts b/src/api/api/index.ts index e86110ed8..f131d1bf5 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -56,7 +56,7 @@ const acceptedParams: Record/_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } ``` In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 59c8f9d0d..b3ecb4652 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -3556,7 +3556,7 @@ export default class Ml { 'reset_start' ] } - return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** diff --git a/src/api/types.ts b/src/api/types.ts index 89fc975d6..16645f4d5 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -3241,15 +3241,24 @@ export type SearchTotalHitsRelation = 'eq' | 'gte' export type SearchTrackHits = boolean | integer export interface SearchMvtRequest extends RequestBase { - /** Comma-separated list of data streams, indices, or aliases to search */ + /** A list of indices, data streams, or aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. + * To search a remote cluster, use the `:` syntax. */ index: Indices - /** Field containing geospatial data to return */ + /** A field that contains the geospatial data to return. + * It must be a `geo_point` or `geo_shape` field. + * The field must have doc values enabled. It cannot be a nested field. + * + * NOTE: Vector tiles do not natively support geometry collections. + * For `geometrycollection` values in a `geo_shape` field, the API returns a hits layer feature for each element of the collection. + * This behavior may change in a future release. */ field: Field - /** Zoom level for the vector tile to search */ + /** The zoom level of the vector tile to search. It accepts `0` to `29`. */ zoom: SearchMvtZoomLevel - /** X coordinate for the vector tile to search */ + /** The X coordinate for the vector tile to search. */ x: SearchMvtCoordinate - /** Y coordinate for the vector tile to search */ + /** The Y coordinate for the vector tile to search. */ y: SearchMvtCoordinate /** Specifies a subset of projects to target for the search using project * metadata tags in a subset of Lucene query syntax. @@ -10533,13 +10542,14 @@ export interface AsyncSearchSubmitRequest extends RequestBase { keep_alive?: Duration /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean /** Indicate if an error should be returned if there is a partial search failure or timeout */ allow_partial_search_results?: boolean /** The analyzer to use for the query string */ analyzer?: string - /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ + /** Specify whether wildcard and prefix queries should be analyzed */ analyze_wildcard?: boolean /** Affects how often partial results become available, which happens whenever shard results are reduced. * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ @@ -10550,7 +10560,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { default_operator?: QueryDslOperator /** The field to use as default where no field prefix is given in the query string */ df?: string - /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + /** Whether to expand wildcard expression to concrete indices that are open, closed or both */ expand_wildcards?: ExpandWildcards /** Whether specified concrete, expanded or aliased indices should be ignored when throttled */ ignore_throttled?: boolean @@ -10558,9 +10568,10 @@ export interface AsyncSearchSubmitRequest extends RequestBase { ignore_unavailable?: boolean /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean - /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ + /** The number of concurrent shard requests per node this search executes concurrently. + * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ max_concurrent_shard_requests?: integer - /** Specify the node or shard the operation should be performed on (default: random) */ + /** Specify the node or shard the operation should be performed on */ preference?: string /** Specifies a subset of projects to target for the search using project * metadata tags in a subset of Lucene query syntax. @@ -10689,7 +10700,7 @@ export interface AutoscalingAutoscalingPolicy { } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { - /** the name of the autoscaling policy */ + /** Name of the autoscaling policy */ name: Name /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -10746,7 +10757,7 @@ export interface AutoscalingGetAutoscalingCapacityResponse { } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { - /** the name of the autoscaling policy */ + /** Name of the autoscaling policy */ name: Name /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -10760,7 +10771,7 @@ export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { - /** the name of the autoscaling policy */ + /** Name of the autoscaling policy */ name: Name /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -15912,7 +15923,7 @@ export interface CcrFollowStatsResponse { } export interface CcrForgetFollowerRequest extends RequestBase { - /** the name of the leader index for which specified follower retention leases should be removed */ + /** Name of the leader index for which specified follower retention leases should be removed */ index: IndexName /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -16056,7 +16067,7 @@ export interface CcrResumeAutoFollowPatternRequest extends RequestBase { export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { - /** The name of the follow index to resume following. */ + /** Name of the follow index to resume following */ index: IndexName /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -16359,7 +16370,7 @@ export interface ClusterGetComponentTemplateRequest extends RequestBase { flat_settings?: boolean /** Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys */ settings_filter?: string | string[] - /** Return all default configurations for the component template (default: false) */ + /** Return all default configurations for the component template */ include_defaults?: boolean /** If `true`, the request retrieves information from the local node only. * If `false`, information is retrieved from the master node. */ @@ -16614,11 +16625,11 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { - /** Return settings in flat format (default: false) */ + /** Return settings in flat format */ flat_settings?: boolean - /** Explicit operation timeout for connection to master node */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Explicit operation timeout */ + /** The period to wait for a response. */ timeout?: Duration /** The settings that persist after the cluster restarts. */ persistent?: Record @@ -16784,19 +16795,20 @@ export interface ClusterRerouteResponse { } export interface ClusterStateRequest extends RequestBase { - /** Limit the information returned to the specified metrics */ + /** Limit the information returned to the specified metrics. */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean - /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + /** Whether to expand wildcard expression to concrete indices that are open, closed or both */ expand_wildcards?: ExpandWildcards - /** Return settings in flat format (default: false) */ + /** Return settings in flat format */ flat_settings?: boolean /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean - /** Return local information, do not retrieve the state from master node (default: false) */ + /** Return local information, do not retrieve the state from master node */ local?: boolean /** Timeout for waiting for new cluster state in case it is blocked */ master_timeout?: Duration @@ -18182,9 +18194,9 @@ export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { index_uuid: Uuid /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss?: boolean - /** Specify timeout for connection to master */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Explicit operation timeout */ + /** The period to wait for a response. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } @@ -18200,9 +18212,9 @@ export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { /** This parameter must be set to true to import a dangling index. * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss?: boolean - /** Specify timeout for connection to master */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Explicit operation timeout */ + /** The period to wait for a response. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } @@ -18462,7 +18474,8 @@ export interface EqlGetStatusResponse { export interface EqlSearchRequest extends RequestBase { /** Comma-separated list of index names to scope the operation */ index: Indices - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -20707,13 +20720,14 @@ export interface IndicesDeleteAliasRequest extends RequestBase { export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody export interface IndicesDeleteDataLifecycleRequest extends RequestBase { - /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ + /** A comma-separated list of data streams of which the data stream lifecycle will be deleted. + * Use `*` to get all data streams */ name: DataStreamNames /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards - /** Specify timeout for connection to master */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Explicit timestamp for the document */ + /** The period to wait for a response. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } @@ -20739,13 +20753,14 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase { export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamOptionsRequest extends RequestBase { - /** A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams */ + /** A comma-separated list of data streams of which the data stream options will be deleted. + * Use `*` to get all data streams */ name: DataStreamNames - /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ + /** Whether wildcard expressions should get expanded to open or closed indices */ expand_wildcards?: ExpandWildcards - /** Specify timeout for connection to master */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration - /** Explicit timestamp for the document */ + /** The period to wait for a response. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } @@ -20934,9 +20949,9 @@ export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { export interface IndicesExplainDataLifecycleRequest extends RequestBase { /** Comma-separated list of index names to explain */ index: Indices - /** indicates if the API should return the default values the system uses for the index's lifecycle */ + /** Indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean - /** Specify timeout for connection to master */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } @@ -21044,19 +21059,20 @@ export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards - /** Specify whether the index should be flushed after performing the operation (default: true) */ + /** Specify whether the index should be flushed after performing the operation */ flush?: boolean /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean - /** The number of segments the index should be merged into (default: dynamic) */ + /** The number of segments the index should be merged into (defayult: dynamic) */ max_num_segments?: long /** Specify whether the operation should only expunge deleted documents */ only_expunge_deletes?: boolean - /** Should the request wait until the force merge is completed. */ + /** Should the request wait until the force merge is completed */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } @@ -21590,7 +21606,7 @@ export interface IndicesOpenResponse { } export interface IndicesPromoteDataStreamRequest extends RequestBase { - /** The name of the data stream */ + /** The name of the data stream to promote */ name: IndexName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -21820,7 +21836,7 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** User defined reason for creating/updating the index template */ + /** User defined reason for creating or updating the index template */ cause?: string /** Name of the index template to create. */ index_patterns?: Indices @@ -21869,7 +21885,8 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { - /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ + /** A comma-separated list of index names the mapping should be added to (supports wildcards). + * Use `_all` or omit to add the mapping on all indices. */ index: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. */ @@ -21976,7 +21993,7 @@ export interface IndicesPutTemplateRequest extends RequestBase { /** Period to wait for a connection to the master node. If no response is * received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** User defined reason for creating/updating the index template */ + /** User defined reason for creating or updating the index template */ cause?: string /** Aliases for the index. */ aliases?: Record @@ -22166,7 +22183,8 @@ export interface IndicesReloadSearchAnalyzersReloadResult { export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { /** A comma-separated list of index names to reload analyzers for */ index: Indices - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -22731,7 +22749,7 @@ export interface IndicesStatsMappingStats { } export interface IndicesStatsRequest extends RequestBase { - /** Limit the information returned the specific metrics. */ + /** Limit the information returned the specific metrics */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices @@ -23245,7 +23263,7 @@ export interface InferenceAzureAiStudioServiceSettings { * Note that some providers may support only certain task types. * Supported providers include: * - * * `cohere` - available for `text_embedding` and `completion` task types + * * `cohere` - available for `text_embedding`, `rerank` and `completion` task types * * `databricks` - available for `completion` task type only * * `meta` - available for `completion` task type only * * `microsoft_phi` - available for `completion` task type only @@ -26502,7 +26520,7 @@ export interface IngestGetPipelineRequest extends RequestBase { /** Period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Return pipelines without their definitions (default: false) */ + /** Return pipelines without their definitions */ summary?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } @@ -26731,7 +26749,7 @@ export interface LicensePostResponse { } export interface LicensePostStartBasicRequest extends RequestBase { - /** whether the user has acknowledged acknowledge messages (default: false) */ + /** Whether the user has acknowledged acknowledge messages */ acknowledge?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -26752,9 +26770,9 @@ export interface LicensePostStartBasicResponse { } export interface LicensePostStartTrialRequest extends RequestBase { - /** whether the user has acknowledged acknowledge messages (default: false) */ + /** Whether the user has acknowledged acknowledge messages */ acknowledge?: boolean - /** The type of trial license to generate (default: "trial") */ + /** The type of trial license to generate */ type?: string /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -29458,7 +29476,9 @@ export interface MlEvaluateDataFrameRequest extends RequestBase { querystring?: { [key: string]: any } & { evaluation?: never, index?: never, query?: never } } -export interface MlEvaluateDataFrameResponse { +export type MlEvaluateDataFrameResponse = MlEvaluateDataFrameResponseBody + +export interface MlEvaluateDataFrameResponseBody { /** Evaluation results for a classification analysis. * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary @@ -31037,7 +31057,7 @@ export interface MlStartTrainedModelDeploymentRequest extends RequestBase { * it will automatically be changed to a value less than the number of hardware threads. * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer - /** The deployment priority. */ + /** The deployment priority */ priority?: MlTrainingPriority /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds * this value, new requests are rejected with a 429 error. */ @@ -32349,7 +32369,7 @@ export interface NodesHotThreadsRequest extends RequestBase { timeout?: Duration /** The type to sample. */ type?: ThreadType - /** The sort order for 'cpu' type (default: total) */ + /** The sort order for 'cpu' type */ sort?: ThreadType /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } @@ -32769,7 +32789,7 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons export interface NodesStatsRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds - /** Limit the information returned to the specified metrics */ + /** Limits the information returned to the specific metrics. */ metric?: Metrics /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ index_metric?: Metrics @@ -32812,7 +32832,8 @@ export interface NodesUsageNodeUsage { } export interface NodesUsageRequest extends RequestBase { - /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ + /** A comma-separated list of node IDs or names to limit the returned information. + * Use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. */ node_id?: NodeIds /** Limits the information returned to the specific metrics. * A comma-separated list of the following options: `_all`, `rest_actions`. */ @@ -33625,9 +33646,10 @@ export interface SearchableSnapshotsClearCacheRequest extends RequestBase { /** A comma-separated list of data streams, indices, and aliases to clear from the cache. * It supports wildcards (`*`). */ index?: Indices - /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + /** Whether to expand wildcard expression to concrete indices that are open, closed or both */ expand_wildcards?: ExpandWildcards - /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. + * (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean @@ -34391,7 +34413,7 @@ export interface SecurityCreateServiceTokenRequest extends RequestBase { * NOTE: Token names must be unique in the context of the associated service account. * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name - /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } @@ -34514,7 +34536,7 @@ export interface SecurityDeleteServiceTokenRequest extends RequestBase { service: Service /** The name of the service account token. */ name: Name - /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } @@ -36258,7 +36280,7 @@ export interface SlmExecuteRetentionRequest extends RequestBase { export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { - /** Comma-separated list of snapshot lifecycle policies to retrieve */ + /** A comma-separated list of snapshot lifecycle policy identifiers. */ policy_id?: Names /** The period to wait for a connection to the master node. * If no response is received before the timeout expires, the request fails and returns an error. */ @@ -39729,9 +39751,9 @@ export interface WatcherPutWatchRequest extends RequestBase { /** The initial state of the watch. * The default value is `true`, which means the watch is active by default. */ active?: boolean - /** only update the watch if the last operation that has changed the watch has the specified primary term */ + /** Only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long - /** only update the watch if the last operation that has changed the watch has the specified sequence number */ + /** Only update the watch if the last operation that has changed the watch has the specified sequence number */ if_seq_no?: SequenceNumber /** Explicit version number for concurrency control */ version?: VersionNumber @@ -39961,7 +39983,7 @@ export interface XpackInfoRequest extends RequestBase { /** A comma-separated list of the information categories to include in the response. * For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[] - /** If this param is used it must be set to true */ + /** If used, this otherwise ignored parameter must be set to true */ accept_enterprise?: boolean /** Defines whether additional human-readable information is included in the response. * In particular, it adds descriptions and a tag line. */