From 945a0f516ad220f915e5e3a6623b6b9b8487c9c9 Mon Sep 17 00:00:00 2001 From: J Stickler Date: Mon, 15 Apr 2024 09:03:56 -0400 Subject: [PATCH 1/9] docs: Update publish-technical-documentation-next.yml (#12617) Signed-off-by: Jack Baldry Co-authored-by: Jack Baldry --- .github/workflows/publish-technical-documentation-next.yml | 1 - .github/workflows/publish-technical-documentation-release.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/publish-technical-documentation-next.yml b/.github/workflows/publish-technical-documentation-next.yml index afa567aa5cd9..b4cf557cc75c 100644 --- a/.github/workflows/publish-technical-documentation-next.yml +++ b/.github/workflows/publish-technical-documentation-next.yml @@ -10,7 +10,6 @@ on: jobs: sync: runs-on: "ubuntu-latest" - needs: "test" steps: - name: "Check out code" uses: "actions/checkout@v4" diff --git a/.github/workflows/publish-technical-documentation-release.yml b/.github/workflows/publish-technical-documentation-release.yml index 046cb6574e7b..d8f17f4d457f 100644 --- a/.github/workflows/publish-technical-documentation-release.yml +++ b/.github/workflows/publish-technical-documentation-release.yml @@ -12,7 +12,6 @@ on: jobs: sync: runs-on: "ubuntu-latest" - needs: "test" steps: - name: "Checkout code and tags" uses: "actions/checkout@v4" From be0388443f9d65a6fe4219f200261feac09fe227 Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Mon, 15 Apr 2024 15:56:17 -0300 Subject: [PATCH 2/9] docs: helm: Update monolithic guide. (#12591) Co-authored-by: J Stickler --- .../install/helm/install-monolithic/_index.md | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/sources/setup/install/helm/install-monolithic/_index.md b/docs/sources/setup/install/helm/install-monolithic/_index.md index e85d6a52159b..fcd9e6671625 100644 --- a/docs/sources/setup/install/helm/install-monolithic/_index.md +++ b/docs/sources/setup/install/helm/install-monolithic/_index.md @@ -12,7 +12,7 @@ weight: 100 This Helm Chart installation runs the Grafana Loki *single binary* within a Kubernetes cluster. -If you set the `singleBinary.replicas` value to 1, this chart configures Loki to run the `all` target in a [monolithic mode]({{< relref "../../../../get-started/deployment-modes#monolithic-mode" >}}), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. +If you set the `singleBinary.replicas` value to 1 and set the deployment mode to `SingleBinary`, this chart configures Loki to run the `all` target in a [monolithic mode](https://grafana.com/docs/loki//get-started/deployment-modes/#monolithic-mode), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. If you set the `singleBinary.replicas` value to 2 or more, this chart configures Loki to run a *single binary* in a replicated, highly available mode. When running replicas of a single binary, you must configure object storage. **Before you begin: Software Requirements** @@ -39,13 +39,29 @@ If you set the `singleBinary.replicas` value to 2 or more, this chart configures - If running a single replica of Loki, configure the `filesystem` storage: ```yaml + mode: SingleBinary loki: commonConfig: replication_factor: 1 storage: type: 'filesystem' + schemaConfig: + configs: + - from: 2024-01-01 + store: tsdb + index: + prefix: loki_index_ + period: 24h + object_store: filesystem # we're storing on filesystem so there's no real persistence here. + schema: v13 singleBinary: replicas: 1 + read: + replicas: 0 + backend: + replicas: 0 + write: + replicas: 0 ``` - If running Loki with a replication factor greater than 1, set the desired number replicas and provide object storage credentials: @@ -54,6 +70,15 @@ If you set the `singleBinary.replicas` value to 2 or more, this chart configures loki: commonConfig: replication_factor: 3 + schemaConfig: + configs: + - from: 2024-01-01 + store: tsdb + index: + prefix: loki_index_ + period: 24h + object_store: filesystem + schema: v13 storage: bucketNames: chunks: loki-chunks From 7a81d264a4ba54efdb1d79d382fd4188c036aaee Mon Sep 17 00:00:00 2001 From: Joshua Ford Date: Mon, 15 Apr 2024 14:35:48 -0500 Subject: [PATCH 3/9] fix: lambda-promtail, update s3 filename regex to allow finding of log files from AWS GovCloud regions (#12482) --- tools/lambda-promtail/lambda-promtail/s3.go | 4 +- .../lambda-promtail/s3_test.go | 101 ++++++++++++++++++ 2 files changed, 103 insertions(+), 2 deletions(-) diff --git a/tools/lambda-promtail/lambda-promtail/s3.go b/tools/lambda-promtail/lambda-promtail/s3.go index 5dca5cf7d609..77694ba60343 100644 --- a/tools/lambda-promtail/lambda-promtail/s3.go +++ b/tools/lambda-promtail/lambda-promtail/s3.go @@ -75,9 +75,9 @@ var ( // source: https://docs.aws.amazon.com/waf/latest/developerguide/logging-s3.html // format: aws-waf-logs-suffix[/prefix]/AWSLogs/aws-account-id/WAFLogs/region/webacl-name/year/month/day/hour/minute/aws-account-id_waflogs_region_webacl-name_timestamp_hash.log.gz // example: aws-waf-logs-test/AWSLogs/11111111111/WAFLogs/us-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz - defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)\_\w+-\w+-\d_(?:(?Papp|net)\.*?)?(?P[a-zA-Z0-9\-]+)`) + defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?Papp|net)\.*?)?(?P[a-zA-Z0-9\-]+)`) defaultTimestampRegex = regexp.MustCompile(`(?P\d+-\d+-\d+T\d+:\d+:\d+(?:\.\d+Z)?)`) - cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?Po-[a-z0-9]{10,32})?\/?(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?.+_(?P[a-zA-Z0-9\-]+)`) + cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?Po-[a-z0-9]{10,32})?\/?(?P\d+)\/(?P[a-zA-Z0-9_\-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)_(?:\w+-\w+-(?:\w+-)?\d)_(?:(?:app|nlb|net)\.*?)?.+_(?P[a-zA-Z0-9\-]+)`) cloudfrontFilenameRegex = regexp.MustCompile(`(?P.*)\/(?P[A-Z0-9]+)\.(?P\d+)-(?P\d+)-(?P\d+)-(.+)`) cloudfrontTimestampRegex = regexp.MustCompile(`(?P\d+-\d+-\d+\s\d+:\d+:\d+)`) wafFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P\d+)\/(?PWAFLogs)\/(?P[\w-]+)\/(?P[\w-]+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/(?P\d+)\/\d+\_waflogs\_[\w-]+_[\w-]+_\d+T\d+Z_\w+`) diff --git a/tools/lambda-promtail/lambda-promtail/s3_test.go b/tools/lambda-promtail/lambda-promtail/s3_test.go index 60a22abba7a3..644ad12f1727 100644 --- a/tools/lambda-promtail/lambda-promtail/s3_test.go +++ b/tools/lambda-promtail/lambda-promtail/s3_test.go @@ -126,6 +126,39 @@ func Test_getLabels(t *testing.T) { }, wantErr: false, }, + { + name: "s3_govcloud_flow_logs", + args: args{ + record: events.S3EventRecord{ + AWSRegion: "us-gov-east-1", + S3: events.S3Entity{ + Bucket: events.S3Bucket{ + Name: "vpc_logs_test", + OwnerIdentity: events.S3UserIdentity{ + PrincipalID: "test", + }, + }, + Object: events.S3Object{ + Key: "my-bucket/AWSLogs/123456789012/vpcflowlogs/us-gov-east-1/2022/01/24/123456789012_vpcflowlogs_us-gov-east-1_fl-1234abcd_20180620T1620Z_fe123456.log.gz", + }, + }, + }, + }, + want: map[string]string{ + "account_id": "123456789012", + "bucket": "vpc_logs_test", + "bucket_owner": "test", + "bucket_region": "us-gov-east-1", + "day": "24", + "key": "my-bucket/AWSLogs/123456789012/vpcflowlogs/us-gov-east-1/2022/01/24/123456789012_vpcflowlogs_us-gov-east-1_fl-1234abcd_20180620T1620Z_fe123456.log.gz", + "month": "01", + "region": "us-gov-east-1", + "src": "fl-1234abcd", + "type": FLOW_LOG_TYPE, + "year": "2022", + }, + wantErr: false, + }, { name: "cloudtrail_digest_logs", args: args{ @@ -192,6 +225,39 @@ func Test_getLabels(t *testing.T) { }, wantErr: false, }, + { + name: "cloudtrail_govcloud_logs", + args: args{ + record: events.S3EventRecord{ + AWSRegion: "us-gov-east-1", + S3: events.S3Entity{ + Bucket: events.S3Bucket{ + Name: "cloudtrail_logs_test", + OwnerIdentity: events.S3UserIdentity{ + PrincipalID: "test", + }, + }, + Object: events.S3Object{ + Key: "my-bucket/AWSLogs/123456789012/CloudTrail/us-gov-east-1/2022/01/24/123456789012_CloudTrail_us-gov-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz", + }, + }, + }, + }, + want: map[string]string{ + "account_id": "123456789012", + "bucket": "cloudtrail_logs_test", + "bucket_owner": "test", + "bucket_region": "us-gov-east-1", + "day": "24", + "key": "my-bucket/AWSLogs/123456789012/CloudTrail/us-gov-east-1/2022/01/24/123456789012_CloudTrail_us-gov-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz", + "month": "01", + "region": "us-gov-east-1", + "src": "4jhzXFO2Jlvu2b3y", + "type": CLOUDTRAIL_LOG_TYPE, + "year": "2022", + }, + wantErr: false, + }, { name: "organization_cloudtrail_logs", args: args{ @@ -293,6 +359,41 @@ func Test_getLabels(t *testing.T) { }, wantErr: false, }, + { + name: "s3_govcloud_waf", + args: args{ + record: events.S3EventRecord{ + AWSRegion: "us-gov-east-1", + S3: events.S3Entity{ + Bucket: events.S3Bucket{ + Name: "waf_logs_test", + OwnerIdentity: events.S3UserIdentity{ + PrincipalID: "test", + }, + }, + Object: events.S3Object{ + Key: "prefix/AWSLogs/11111111111/WAFLogs/us-gov-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-gov-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz", + }, + }, + }, + }, + want: map[string]string{ + "account_id": "11111111111", + "bucket_owner": "test", + "bucket_region": "us-gov-east-1", + "bucket": "waf_logs_test", + "day": "28", + "hour": "19", + "key": "prefix/AWSLogs/11111111111/WAFLogs/us-gov-east-1/TEST-WEBACL/2021/10/28/19/50/11111111111_waflogs_us-gov-east-1_TEST-WEBACL_20211028T1950Z_e0ca43b5.log.gz", + "minute": "50", + "month": "10", + "region": "us-gov-east-1", + "src": "TEST-WEBACL", + "type": WAF_LOG_TYPE, + "year": "2021", + }, + wantErr: false, + }, { name: "missing_type", args: args{ From 3b0fa184c542969c6c355fd65e33341f62172de3 Mon Sep 17 00:00:00 2001 From: J Stickler Date: Mon, 15 Apr 2024 16:15:22 -0400 Subject: [PATCH 4/9] docs: hide the sizing calculator until updated (#12598) --- docs/sources/setup/_index.md | 1 - docs/sources/setup/install/_index.md | 4 ---- docs/sources/setup/size/_index.md | 2 +- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/sources/setup/_index.md b/docs/sources/setup/_index.md index e1e1caef768f..2464feb75f35 100644 --- a/docs/sources/setup/_index.md +++ b/docs/sources/setup/_index.md @@ -7,7 +7,6 @@ weight: 300 # Setup Loki -- Estimate the initial [size]({{< relref "./size" >}}) for your Loki cluster. - [Install]({{< relref "./install" >}}) Loki. - [Migrate]({{< relref "./migrate" >}}) from one Loki implementation to another. - [Upgrade]({{< relref "./upgrade" >}}) from one Loki version to a newer version. diff --git a/docs/sources/setup/install/_index.md b/docs/sources/setup/install/_index.md index 11521f1158ed..2b56cba78cb6 100644 --- a/docs/sources/setup/install/_index.md +++ b/docs/sources/setup/install/_index.md @@ -17,10 +17,6 @@ There are several methods of installing Loki and Promtail: - [Install and run locally]({{< relref "./local" >}}) - [Install from source]({{< relref "./install-from-source" >}}) -The [Sizing Tool]({{< relref "../size" >}}) can be used to determine the proper cluster sizing -given an expected ingestion rate and query performance. It targets the Helm -installation on Kubernetes. - ## General process In order to run Loki, you must: diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md index e2215c7e80f7..74dcb8e50496 100644 --- a/docs/sources/setup/size/_index.md +++ b/docs/sources/setup/size/_index.md @@ -6,7 +6,7 @@ aliases: - ../installation/sizing/ - ../installation/helm/generate weight: 100 -keywords: [] +draft: true --- From 56c5dc3b15cadff7dbc08045603e6a403a4de21a Mon Sep 17 00:00:00 2001 From: J Stickler Date: Mon, 15 Apr 2024 16:39:41 -0400 Subject: [PATCH 5/9] docs: clarify upgrade behavior (#12578) Co-authored-by: Christian Haudum --- docs/sources/operations/scalability.md | 2 +- docs/sources/release-notes/v2-6.md | 4 ++-- docs/sources/setup/upgrade/_index.md | 16 ++++++++++------ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md index e916e2bbdbe7..f7117ff10347 100644 --- a/docs/sources/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -17,7 +17,7 @@ and scaling for resource usage. The Query frontend has an in-memory queue that can be moved out into a separate process similar to the [Grafana Mimir query-scheduler](/docs/mimir/latest/operators-guide/architecture/components/query-scheduler/). This allows running multiple query frontends. -To run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file]({{< relref "../configure/_index.md" >}}). +To run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file](https://grafana.com/docs/loki//configure). It is not valid to start the querier with both a configured frontend and a scheduler address. diff --git a/docs/sources/release-notes/v2-6.md b/docs/sources/release-notes/v2-6.md index aeac78937f70..8c873fa9520b 100644 --- a/docs/sources/release-notes/v2-6.md +++ b/docs/sources/release-notes/v2-6.md @@ -13,7 +13,7 @@ Grafana Labs is excited to announce the release of Loki 2.6. Here's a summary of - **Query multiple tenants at once.** We've introduced cross-tenant query federation, which allows you to issue one query to multiple tenants and get a single, consolidated result. This is great for scenarios where you need a global view of logs within your multi-tenant cluster. For more information on how to enable this feature, see [Multi-Tenancy]({{< relref "../operations/multi-tenancy.md" >}}). - **Filter out and delete certain log lines from query results.** This is particularly useful in cases where users may accidentally write sensitive information to Loki that they do not want exposed. Users craft a LogQL query that selects the specific lines they're interested in, and then can choose to either filter out those lines from query results, or permanently delete them from Loki's storage. For more information, see [Logs Deletion]({{< relref "../operations/storage/logs-deletion.md" >}}). - **Improved query performance on instant queries.** Loki now splits instant queries with a large time range (for example, `sum(rate({app="foo"}[6h]))`) into several smaller sub-queries and executes them in parallel. Users don't need to take any action to enjoy this performance improvement; however, they can adjust the number of sub-queries generated by modifying the `split_queries_by_interval` configuration parameter, which currently defaults to `30m`. -- **Support Baidu AI Cloud as a storage backend.** Loki users can now use Baidu Object Storage (BOS) as their storage backend. See [bos_storage_config]({{< relref "../configure/_index.md#bos_storage_config" >}}) for details. +- **Support Baidu AI Cloud as a storage backend.** Loki users can now use Baidu Object Storage (BOS) as their storage backend. See [bos_storage_config](https://grafana.com/docs/loki//configure/) for details. For a full list of all changes, look at the [CHANGELOG](https://github.com/grafana/loki/blob/main/CHANGELOG.md). @@ -40,4 +40,4 @@ A summary of some of the more important fixes: - [PR 6152](https://github.com/grafana/loki/pull/6152) Fixed a scenario where live tailing of logs could cause unbounded ingester memory growth. - [PR 5685](https://github.com/grafana/loki/pull/5685) Fixed a bug in Loki's push request parser that allowed users to send arbitrary non-string data as a log line. We now test that the pushed values are valid strings and return an error if values are not valid strings. - [PR 5799](https://github.com/grafana/loki/pull/5799) Fixed incorrect deduplication logic for cases where multiple log entries with the same timestamp exist. -- [PR 5888](https://github.com/grafana/loki/pull/5888) Fixed a bug in the [common configuration]({{< relref "../configure/_index.md#common" >}}) where the `instance_interface_names` setting was getting overwritten by the default ring configuration. +- [PR 5888](https://github.com/grafana/loki/pull/5888) Fixed a bug in the [common configuration](https://grafana.com/docs/loki//configure/#common) where the `instance_interface_names` setting was getting overwritten by the default ring configuration. diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md index a877f141ddcf..e5abde43173d 100644 --- a/docs/sources/setup/upgrade/_index.md +++ b/docs/sources/setup/upgrade/_index.md @@ -104,6 +104,10 @@ If no label is found matching the list, a value of `unknown_service` is applied. You can change this list by providing a list of labels to `discover_service_name` in the [limits_config](/docs/loki//configure/#limits_config) block. +{{< admonition type="note" >}} +If you are already using a `service_label`, Loki will not make a new assignment. +{{< /admonition >}} + **You can disable this by providing an empty value for `discover_service_name`.** #### Removed `shared_store` and `shared_store_key_prefix` from shipper configuration @@ -171,7 +175,7 @@ The path prefix under which the delete requests are stored is decided by `-compa #### Configuration `async_cache_write_back_concurrency` and `async_cache_write_back_buffer_size` have been removed -These configurations were redundant with the `Background` configuration in the [cache-config]({{< relref "../../configure#cache_config" >}}). +These configurations were redundant with the `Background` configuration in the [cache-config](https://grafana.com/docs/loki//configure/#cache_config). `async_cache_write_back_concurrency` can be set with `writeback_goroutines` `async_cache_write_back_buffer_size` can be set with `writeback_buffer` @@ -277,7 +281,7 @@ The TSDB index type has support for caching results for 'stats' and 'volume' que All of these are cached to the `results_cache` which is configured in the `query_range` config section. By default, an in memory cache is used. #### Write dedupe cache is deprecated -Write dedupe cache is deprecated because it not required by the newer single store indexes ([TSDB]({{< relref "../../operations/storage/tsdb" >}}) and [boltdb-shipper]({{< relref "../../operations/storage/boltdb-shipper" >}})). +Write dedupe cache is deprecated because it not required by the newer single store indexes ([TSDB](https://grafana.com/docs/loki//operations/storage/tsdb/) and [boltdb-shipper](https://grafana.com/docs/loki//operations/storage/boltdb-shipper/)). If you using a [legacy index type](https://grafana.com/docs/loki//configure/storage/#index-storage), consider migrating to TSDB (recommended). #### Embedded cache metric changes @@ -761,7 +765,7 @@ This histogram reports the distribution of log line sizes by file. It has 8 buck This creates a lot of series and we don't think this metric has enough value to offset the amount of series genereated so we are removing it. -While this isn't a direct replacement, two metrics we find more useful are size and line counters configured via pipeline stages, an example of how to configure these metrics can be found in the [metrics pipeline stage docs]({{< relref "../../send-data/promtail/stages/metrics#counter" >}}). +While this isn't a direct replacement, two metrics we find more useful are size and line counters configured via pipeline stages, an example of how to configure these metrics can be found in the [metrics pipeline stage docs](https://grafana.com/docs/loki//send-data/promtail/stages/metrics/#counter). #### `added Docker target` log message has been demoted from level=error to level=info @@ -815,7 +819,7 @@ limits_config: retention_period: [30d] ``` -See the [retention docs]({{< relref "../../operations/storage/retention" >}}) for more info. +See the [retention docs](https://grafana.com/docs/loki//operations/storage/retention/) for more info. #### Log messages on startup: proto: duplicate proto type registered: @@ -1286,7 +1290,7 @@ If you happen to have `results_cache.max_freshness` set, use `limits_config.max_ ### Promtail config removed -The long deprecated `entry_parser` config in Promtail has been removed, use [pipeline_stages]({{< relref "../../send-data/promtail/configuration#pipeline_stages" >}}) instead. +The long deprecated `entry_parser` config in Promtail has been removed, use [pipeline_stages](https://grafana.com/docs/loki//send-data/promtail/configuration/#pipeline_stages) instead. ### Upgrading schema to use boltdb-shipper and/or v11 schema @@ -1616,7 +1620,7 @@ max_retries: Loki 1.4.0 vendors Cortex v0.7.0-rc.0 which contains [several breaking config changes](https://github.com/cortexproject/cortex/blob/v0.7.0-rc.0/CHANGELOG). -In the [cache_config]({{< relref "../../configure#cache_config" >}}), `defaul_validity` has changed to `default_validity`. +In the [cache_config](https://grafana.com/docs/loki//configure#cache_config), `defaul_validity` has changed to `default_validity`. If you configured your schema via arguments and not a config file, this is no longer supported. This is not something we had ever provided as an option via docs and is unlikely anyone is doing, but worth mentioning. From 397aa56e157cbf733da548474a4bcae773e82362 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Tue, 16 Apr 2024 10:57:39 +0200 Subject: [PATCH 6/9] feat: Enable log volume endpoint by default (#12628) --- docs/sources/shared/configuration.md | 3 ++- pkg/validation/limits.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index faa53b39d140..6c43486f67c1 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -3148,7 +3148,8 @@ The `limits_config` block configures global and per-tenant limits in Loki. The v [max_querier_bytes_read: | default = 150GB] # Enable log-volume endpoints. -[volume_enabled: ] +# CLI flag: -limits.volume-enabled +[volume_enabled: | default = true] # The maximum number of aggregated series in a log-volume response # CLI flag: -limits.volume-max-series diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 9e2e3a6042c8..1b82716ce732 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -385,6 +385,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { _ = l.MaxStructuredMetadataSize.Set(defaultMaxStructuredMetadataSize) f.Var(&l.MaxStructuredMetadataSize, "limits.max-structured-metadata-size", "Maximum size accepted for structured metadata per entry. Default: 64 kb. Any log line exceeding this limit will be discarded. There is no limit when unset or set to 0.") f.IntVar(&l.MaxStructuredMetadataEntriesCount, "limits.max-structured-metadata-entries-count", defaultMaxStructuredMetadataCount, "Maximum number of structured metadata entries per log line. Default: 128. Any log line exceeding this limit will be discarded. There is no limit when unset or set to 0.") + f.BoolVar(&l.VolumeEnabled, "limits.volume-enabled", true, "Enable log volume endpoint.") } // SetGlobalOTLPConfig set GlobalOTLPConfig which is used while unmarshaling per-tenant otlp config to use the default list of resource attributes picked as index labels. From 0831802a99243f9fe61f6cc8795739bf67e8d8e9 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Tue, 16 Apr 2024 21:17:19 +0530 Subject: [PATCH 7/9] fix: fix setting of info log level when trying to detect level from log lines (#12635) --- pkg/distributor/distributor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 15ffdfdb9482..eae29a57c905 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -919,7 +919,7 @@ func extractLogLevelFromLogLine(log string) string { return logLevelDebug } if strings.Contains(log, `:"info"`) || strings.Contains(log, `:"INFO"`) { - return logLevelDebug + return logLevelInfo } } @@ -940,7 +940,7 @@ func extractLogLevelFromLogLine(log string) string { return logLevelDebug } if strings.Contains(log, "=info") || strings.Contains(log, "=INFO") { - return logLevelDebug + return logLevelInfo } } From 7f6f5a538bfd12f070b7783e65d2262fdc1de511 Mon Sep 17 00:00:00 2001 From: ND Tai <49815011+taind772@users.noreply.github.com> Date: Wed, 17 Apr 2024 01:24:29 +0700 Subject: [PATCH 8/9] docs: correct typo on api param (#12631) Co-authored-by: J Stickler --- docs/sources/reference/loki-http-api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/loki-http-api.md b/docs/sources/reference/loki-http-api.md index c23ae69f0687..81904e20138c 100644 --- a/docs/sources/reference/loki-http-api.md +++ b/docs/sources/reference/loki-http-api.md @@ -1040,7 +1040,7 @@ GET /config ``` `/config` exposes the current configuration. The optional `mode` query parameter can be used to -modify the output. If it has the value `diff` only the differences between the default configuration +modify the output. If it has the value `diffs` only the differences between the default configuration and the current are returned. A value of `defaults` returns the default configuration. In microservices mode, the `/config` endpoint is exposed by all components. From eaa06f8c70fdacc74dfa47163b9291369af2516c Mon Sep 17 00:00:00 2001 From: David Allen Date: Tue, 16 Apr 2024 14:58:05 -0400 Subject: [PATCH 9/9] docs: add play link, correct errors to visualization instructions (#12604) Co-authored-by: J Stickler --- docs/sources/visualize/grafana.md | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/sources/visualize/grafana.md b/docs/sources/visualize/grafana.md index 3715ac62c8f8..9a1ba98c8fd7 100644 --- a/docs/sources/visualize/grafana.md +++ b/docs/sources/visualize/grafana.md @@ -13,17 +13,16 @@ keywords: --- # Visualize log data -[Grafana 6.0](/grafana/download/6.0.0) and more recent -versions have built-in support for Grafana Loki. -Use [Grafana 6.3](/grafana/download/6.3.0) or a more -recent version to take advantage of [LogQL]({{< relref "../query/_index.md" >}}) functionality. +Modern Grafana versions after 6.3 have built-in support for Grafana Loki and [LogQL](https://grafana.com/docs/loki//query/). + +## Using Explore 1. Log into your Grafana instance. If this is your first time running Grafana, the username and password are both defaulted to `admin`. -1. In Grafana, go to `Configuration` > `Data Sources` via the cog icon on the +1. In Grafana, go to `Connections` > `Data Sources` via the cog icon on the left sidebar. -1. Click the big + Add data source button. -1. Choose Loki from the list. +1. Click the big + Add a new data source button. +1. Search for, or choose Loki from the list. 1. The http URL field should be the address of your Loki server. For example, when running locally or with Docker using port mapping, the address is likely `http://localhost:3100`. When running with docker-compose or @@ -36,10 +35,21 @@ recent version to take advantage of [LogQL]({{< relref "../query/_index.md" >}}) Log labels button. 1. Learn more about querying by reading about Loki's query language [LogQL]({{< relref "../query/_index.md" >}}). +If you would like to see an example of this live, you can try [Grafana Play's Explore feature](https://play.grafana.org/explore?schemaVersion=1&panes=%7B%22v1d%22:%7B%22datasource%22:%22ac4000ca-1959-45f5-aa45-2bd0898f7026%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22expr%22:%22%7Bagent%3D%5C%22promtail%5C%22%7D%20%7C%3D%20%60%60%22,%22queryType%22:%22range%22,%22datasource%22:%7B%22type%22:%22loki%22,%22uid%22:%22ac4000ca-1959-45f5-aa45-2bd0898f7026%22%7D,%22editorMode%22:%22builder%22%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D%7D&orgId=1) + Read more about Grafana's Explore feature in the [Grafana documentation](http://docs.grafana.org/features/explore) and on how to search and filter for logs with Loki. +## Using Grafana Dashboards + +Because Loki can be used as a built-in data source above, we can use LogQL queries based on that datasource +to build complex visualizations that persist on Grafana dashboards. + +{{< docs/play title="Loki Example Grafana Dashboard" url="https://play.grafana.org/d/T512JVH7z/" >}} + +Read more about how to build Grafana Dashboards in [build your first dashbboard](https://grafana.com/docs/grafana/latest/getting-started/build-first-dashboard/) + To configure Loki as a data source via provisioning, see [Configuring Grafana via Provisioning](http://docs.grafana.org/features/datasources/loki/#configure-the-datasource-with-provisioning). Set the URL in the provisioning.