diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 00000000..76edfa61
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1 @@
+{"image":"mcr.microsoft.com/devcontainers/universal:2"}
\ No newline at end of file
diff --git a/.editorconfig b/.editorconfig
index 88723dd9..5039a0f7 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -1,4 +1,19 @@
[*.cs]
-# IDE0008: Use explicit type
-csharp_style_var_elsewhere = true
+# HAA0101: Array allocation for params parameter
+dotnet_diagnostic.HAA0101.severity = none
+
+# HAA0601: Value type to reference type conversion causing boxing allocation
+dotnet_diagnostic.HAA0601.severity = none
+
+# HAA0603: Delegate allocation from a method group
+dotnet_diagnostic.HAA0603.severity = none
+
+# HAA0301: Closure Allocation Source
+dotnet_diagnostic.HAA0301.severity = none
+
+# HAA0302: Display class allocation to capture closure
+dotnet_diagnostic.HAA0302.severity = none
+
+# HAA0401: Possible allocation of reference type enumerator
+dotnet_diagnostic.HAA0401.severity = none
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000..69ef038e
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,13 @@
+# These are supported funding model platforms
+
+github: [bnayae]
+patreon: # Replace with a single Patreon username
+open_collective: # Replace with a single Open Collective username
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
+custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/workflows/build-publish-v2.yml b/.github/workflows/build-publish-v2.yml
index 6cd8ee3c..0f0642de 100644
--- a/.github/workflows/build-publish-v2.yml
+++ b/.github/workflows/build-publish-v2.yml
@@ -26,18 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
-
- - name: Increment Version
- run: |
- perl -pi -e 's/([0-9]+)\.([0-9]+)\.([0-9]+)/"$1.$2.${\( $3+1 )}"/eg' Directory.Build.props
- - name: Commit changes
- uses: EndBug/add-and-commit@v7
- with:
- author_name: CI/CD
- author_email: bnaya@weknow.network.com
- message: 'Increment Version'
- add: 'Directory.Build.props'
+ - uses: actions/checkout@v2
- name: Setup .NET
uses: actions/setup-dotnet@v1
@@ -50,7 +39,7 @@ jobs:
- name: Build
run: dotnet build --configuration ${{ env.BUILD_CONFIG }} --no-restore
- name: Test
- run: dotnet test Tests/Weknow.EventSource.Backbone.UnitTests --configuration ${{ env.BUILD_CONFIG }} --no-restore --no-build --verbosity normal
+ run: dotnet test Tests/EventSourcing.Backbone.UnitTests --configuration ${{ env.BUILD_CONFIG }} --no-restore --no-build --verbosity normal
- name: Push generated package to GitHub registry
run: dotnet nuget push ./**/*.nupkg -k ${{ secrets.NUGET_PUBLISH }} -s https://api.nuget.org/v3/index.json --skip-duplicate
@@ -68,18 +57,6 @@ jobs:
with:
dotnet-version: ${{ env.DOTNET_VER }}
include-prerelease: ${{ env.INCLUDE_PRERELEASE }}
-
- #- name: Increment Version
- # run: |
- # perl -pi -e 's/([0-9]+)\.([0-9]+)\.([0-9]+)/"$1.$2.${\( $3+1 )}"/eg' Directory.Build.props
- # shell: bash
- #- name: Commit changes
- # uses: EndBug/add-and-commit@v7
- # with:
- # author_name: CI/CD
- # author_email: ${{ inputs.author-email }}
- # message: "Increment Version"
- # add: "Directory.Build.props"
- name: Restore dependencies
run: dotnet restore /property:Configuration=Gen
diff --git a/.github/workflows/prepare-nuget.yml b/.github/workflows/prepare-nuget.yml
index d09e0633..469d0955 100644
--- a/.github/workflows/prepare-nuget.yml
+++ b/.github/workflows/prepare-nuget.yml
@@ -7,8 +7,8 @@ name: Prepare
on:
push:
branches: [ main ]
- pull_request:
- branches: [ main ]
+ # pull_request:
+ # branches: [ main ]
workflow_dispatch:
inputs:
logLevel:
@@ -20,6 +20,6 @@ on:
jobs:
version_increment:
- uses: weknow-network/weknow-workflows/.github/workflows/dotnet-increment-version.yml@main
+ uses: bnayae/open-workflows/.github/workflows/dotnet-increment-version.yml@main
\ No newline at end of file
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/ConsumerChannelConstants.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/ConsumerChannelConstants.cs
new file mode 100644
index 00000000..3f1b0a7d
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/ConsumerChannelConstants.cs
@@ -0,0 +1,12 @@
+namespace EventSourcing.Backbone.Channels;
+
+///
+/// Constants
+///
+internal static class ConsumerChannelConstants
+{
+ ///
+ /// The name of redis consumer channel source
+ ///
+ public const string REDIS_CHANNEL_SOURCE = "evt-src-redis-consumer-channel";
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/EventSourcing.Backbone.Channels.RedisConsumerProvider.csproj b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/EventSourcing.Backbone.Channels.RedisConsumerProvider.csproj
new file mode 100644
index 00000000..7eccb988
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/EventSourcing.Backbone.Channels.RedisConsumerProvider.csproj
@@ -0,0 +1,29 @@
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerBuilder.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerBuilder.cs
new file mode 100644
index 00000000..20572904
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerBuilder.cs
@@ -0,0 +1,234 @@
+using EventSourcing.Backbone.Building;
+using EventSourcing.Backbone.Channels.RedisProvider;
+
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+
+using StackExchange.Redis;
+
+namespace EventSourcing.Backbone;
+
+public static class RedisConsumerBuilder
+{
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// The configuration hook.
+ ///
+ public static IConsumerStoreStrategyBuilder Create(
+ string endpoint,
+ string? password = null,
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(endpoint, password, configurationHook);
+ return configuration.CreateRedisConsumerBuilder();
+ }
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The configuration hook.
+ ///
+ public static IConsumerStoreStrategyBuilder Create(
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(configurationHook);
+ return configuration.CreateRedisConsumerBuilder();
+ }
+
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The redis configuration.
+ /// The setting.
+ ///
+ public static IConsumerStoreStrategyBuilder CreateRedisConsumerBuilder(
+ this ConfigurationOptions options,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ var stg = setting ?? RedisConsumerChannelSetting.Default;
+ var builder = ConsumerBuilder.Empty;
+ var channelBuilder = builder.UseChannel(LocalCreate);
+ return channelBuilder;
+
+ IConsumerChannelProvider LocalCreate(ILogger logger)
+ {
+ var channel = new RedisConsumerChannel(
+ logger,
+ options,
+ stg);
+ return channel;
+ }
+ }
+
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The setting.
+ /// The configuration hook.
+ ///
+ public static IConsumerStoreStrategyBuilder CreateRedisConsumerBuilder(
+ this RedisConsumerChannelSetting setting,
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(configurationHook);
+ return configuration.CreateRedisConsumerBuilder(setting);
+ }
+
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The setting.
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// The configuration hook.
+ ///
+ public static IConsumerStoreStrategyBuilder CreateRedisConsumerBuilder(
+ this RedisConsumerChannelSetting setting,
+ string endpoint,
+ string? password = null,
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(endpoint, password, configurationHook);
+ return configuration.CreateRedisConsumerBuilder(setting);
+ }
+
+
+ ///
+ /// Create REDIS consumer builder.
+ ///
+ /// The credentials keys.
+ /// The setting.
+ /// The configuration hook.
+ ///
+ public static IConsumerStoreStrategyBuilder CreateRedisConsumerBuilder(
+ this RedisCredentialsEnvKeys credentialsKeys,
+ RedisConsumerChannelSetting? setting = null,
+ Action? configurationHook = null)
+ {
+ var configuration = credentialsKeys.CreateConfigurationOptions(configurationHook);
+ return configuration.CreateRedisConsumerBuilder(setting);
+ }
+
+ ///
+ /// Uses REDIS consumer channel.
+ ///
+ /// The builder.
+ /// The setting.
+ /// The redis configuration.
+ ///
+ public static IConsumerStoreStrategyBuilder UseRedisChannel(
+ this IConsumerBuilder builder,
+ RedisConsumerChannelSetting? setting = null,
+ ConfigurationOptions? redisConfiguration = null)
+ {
+ var stg = setting ?? RedisConsumerChannelSetting.Default;
+ var channelBuilder = builder.UseChannel(LocalCreate);
+ return channelBuilder;
+
+ IConsumerChannelProvider LocalCreate(ILogger logger)
+ {
+ var channel = new RedisConsumerChannel(
+ logger,
+ redisConfiguration,
+ stg);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS consumer channel.
+ ///
+ /// The builder.
+ /// Environment keys of the credentials
+ /// The setting.
+ ///
+ public static IConsumerStoreStrategyBuilder UseRedisChannel(
+ this IConsumerBuilder builder,
+ RedisCredentialsEnvKeys credentialsKeys,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ var channelBuilder = builder.UseChannel(LocalCreate);
+ return channelBuilder;
+
+ IConsumerChannelProvider LocalCreate(ILogger logger)
+ {
+ var channel = new RedisConsumerChannel(
+ logger,
+ credentialsKeys,
+ setting);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS consumer channel.
+ ///
+ /// The builder.
+ /// The redis client factory.
+ /// The setting.
+ ///
+ internal static IConsumerStoreStrategyBuilder UseRedisChannel(
+ this IConsumerBuilder builder,
+ IEventSourceRedisConnectionFactory redisClientFactory,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ var channelBuilder = builder.UseChannel(LocalCreate);
+ return channelBuilder;
+
+ IConsumerChannelProvider LocalCreate(ILogger logger)
+ {
+ var channel = new RedisConsumerChannel(
+ redisClientFactory,
+ logger,
+ setting);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS consumer channel.
+ ///
+ /// The builder.
+ /// The service provider.
+ /// The setting.
+ ///
+ /// redisClient
+ public static IConsumerIocStoreStrategyBuilder ResolveRedisConsumerChannel(
+ this IConsumerBuilder builder,
+ IServiceProvider serviceProvider,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ var channelBuilder = builder.UseChannel(serviceProvider, LocalCreate);
+ return channelBuilder;
+
+ IConsumerChannelProvider LocalCreate(ILogger logger)
+ {
+ var connFactory = serviceProvider.GetService();
+ if (connFactory == null)
+ throw new RedisConnectionException(ConnectionFailureType.None, $"{nameof(IEventSourceRedisConnectionFactory)} is not registered, use services.{nameof(RedisDiExtensions.AddEventSourceRedisConnection)} in order to register it at Setup stage.");
+ var channel = new RedisConsumerChannel(
+ connFactory,
+ logger,
+ setting);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS consumer channel.
+ ///
+ /// The service provider.
+ /// The setting.
+ ///
+ /// redisClient
+ public static IConsumerIocStoreStrategyBuilder ResolveRedisConsumerChannel(
+ this IServiceProvider serviceProvider,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ var result = ConsumerBuilder.Empty.ResolveRedisConsumerChannel(serviceProvider, setting);
+ return result;
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs
new file mode 100644
index 00000000..4c30881f
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs
@@ -0,0 +1,1182 @@
+using System.Diagnostics;
+using System.Diagnostics.Metrics;
+using System.Net;
+using System.Runtime.CompilerServices;
+using System.Text.Json;
+
+using Bnaya.Extensions.Common.Disposables;
+
+using EventSourcing.Backbone.Building;
+using EventSourcing.Backbone.Channels.RedisProvider.Common;
+using EventSourcing.Backbone.Consumers;
+using EventSourcing.Backbone.Private;
+
+using Microsoft.Extensions.Logging;
+
+using StackExchange.Redis;
+
+using static System.Math;
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+using static EventSourcing.Backbone.Private.EventSourceTelemetry;
+
+// TODO: [bnaya 2021-07] MOVE TELEMETRY TO THE BASE CLASSES OF PRODUCER / CONSUME
+
+namespace EventSourcing.Backbone.Channels.RedisProvider;
+
+///
+/// The redis consumer channel.
+///
+internal class RedisConsumerChannel : IConsumerChannelProvider
+{
+ private static readonly Counter StealCountCounter = EMeter.CreateCounter("evt-src.sys.consumer.events-stealing", "count",
+ "Attempt to get stale events (messages) from other consumer (which assumed malfunction)");
+ private static readonly Counter StealAmountCounter = EMeter.CreateCounter("evt-src.sys.consumer.events-stealing.messages", "count",
+ "Attempt to get stale events (messages) from other consumer (which assumed malfunction)");
+ private static readonly Counter ConcumeBatchCountCounter = EMeter.CreateCounter("evt-src.sys.consumer.batch", "count",
+ "count of the number of non empty consuming batches form the stream provider");
+ private static readonly Counter ConcumeEventsCounter = EMeter.CreateCounter("evt-src.sys.consumer.events", "count",
+ "Sum of total consuming events (messages) before process");
+ private static readonly Counter ConcumeEventsOperationCounter = EMeter.CreateCounter("evt-src.sys.consumer.events.operation", "count",
+ "Sum of total consuming events (messages) before process");
+ private static readonly Counter ConcumeBatchFailureCounter = EMeter.CreateCounter("evt-src.sys.consumer.batch.failure", "count",
+ "batch reading failure");
+
+ private const string BEGIN_OF_STREAM = "0000000000000";
+ ///
+ /// The read by identifier chunk size.
+ /// REDIS don't have option to read direct position (it read from a position, not includes the position itself),
+ /// therefore read should start before the actual position.
+ ///
+ private const int READ_BY_ID_CHUNK_SIZE = 10;
+ ///
+ /// Receiver max iterations
+ ///
+ private const int READ_BY_ID_ITERATIONS = 1000 / READ_BY_ID_CHUNK_SIZE;
+
+ private readonly ILogger _logger;
+ private readonly RedisConsumerChannelSetting _setting;
+ private readonly IEventSourceRedisConnectionFactory _connFactory;
+ private readonly IConsumerStorageStrategy _defaultStorageStrategy;
+ private const string META_SLOT = "____";
+ private const int INIT_RELEASE_DELAY = 100;
+ private const int MAX_RELEASE_DELAY = 1000 * 30; // 30 seconds
+
+ #region Ctor
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The redis provider promise.
+ /// The logger.
+ /// The setting.
+ public RedisConsumerChannel(
+ IEventSourceRedisConnectionFactory redisConnFactory,
+ ILogger logger,
+ RedisConsumerChannelSetting? setting = null)
+ {
+ _logger = logger;
+ _connFactory = redisConnFactory;
+ _defaultStorageStrategy = new RedisHashStorageStrategy(redisConnFactory);
+ _setting = setting ?? RedisConsumerChannelSetting.Default;
+ }
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The logger.
+ /// The configuration.
+ /// The setting.
+ public RedisConsumerChannel(
+ ILogger logger,
+ ConfigurationOptions? configuration = null,
+ RedisConsumerChannelSetting? setting = null) : this(
+ EventSourceRedisConnectionFactory.Create(
+ logger,
+ configuration),
+ logger,
+ setting)
+ {
+ }
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The logger.
+ /// Environment keys of the credentials
+ /// The setting.
+ /// The configuration hook.
+ public RedisConsumerChannel(
+ ILogger logger,
+ IRedisCredentials credentialsKeys,
+ RedisConsumerChannelSetting? setting = null,
+ Action? configurationHook = null) : this(
+ EventSourceRedisConnectionFactory.Create(
+ credentialsKeys,
+ logger,
+ configurationHook),
+ logger,
+ setting)
+ {
+ }
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The logger.
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// The setting.
+ /// The configuration hook.
+ public RedisConsumerChannel(
+ ILogger logger,
+ string endpoint,
+ string? password = null,
+ RedisConsumerChannelSetting? setting = null,
+ Action? configurationHook = null) : this(
+ EventSourceRedisConnectionFactory.Create(
+ logger,
+ endpoint,
+ password,
+ configurationHook),
+ logger,
+ setting)
+ {
+ }
+
+ #endregion // Ctor
+
+ #region SubsribeAsync
+
+ ///
+ /// Subscribe to the channel for specific metadata.
+ ///
+ /// The consumer plan.
+ /// The function.
+ /// The cancellation token.
+ ///
+ /// When completed
+ ///
+ public async ValueTask SubscribeAsync(
+ IConsumerPlan plan,
+ Func> func,
+ CancellationToken cancellationToken)
+ {
+ var joinCancellationSource = CancellationTokenSource.CreateLinkedTokenSource(plan.Cancellation, cancellationToken);
+ var joinCancellation = joinCancellationSource.Token;
+ ConsumerOptions options = plan.Options;
+
+ ILogger? logger = _logger ?? plan.Logger;
+ logger.LogInformation("REDIS EVENT-SOURCE | SUBSCRIBE key: [{key}], consumer-group: [{consumer-group}], consumer-name: [{consumer-name}]", plan.FullUri(), plan.ConsumerGroup, plan.ConsumerName);
+
+ while (!joinCancellation.IsCancellationRequested)
+ {
+ try
+ {
+ await SubsribeToSingleAsync(plan, func, options, joinCancellation);
+ // TODO: [bnaya 2023-05-22] think of the api for multi stream subscription (by partial uri * pattern) -> var keys = GetKeysUnsafeAsync(pattern: $"{partition}:*").WithCancellation(cancellationToken)
+
+ if (options.FetchUntilUnixDateOrEmpty != null)
+ break;
+ }
+ #region Exception Handling
+
+ catch (OperationCanceledException)
+ {
+ if (_logger == null)
+ Console.WriteLine($"Subscribe cancellation [{plan.FullUri()}] event stream (may have reach the messages limit)");
+ else
+ _logger.LogError("Subscribe cancellation [{uri}] event stream (may have reach the messages limit)",
+ plan.Uri);
+ joinCancellationSource.CancelSafe();
+ }
+ catch (Exception ex)
+ {
+ if (_logger == null)
+ Console.WriteLine($"Fail to subscribe into the [{plan.FullUri()}] event stream");
+ else
+ _logger.LogError(ex, "Fail to subscribe into the [{uri}] event stream",
+ plan.Uri);
+ throw;
+ }
+
+ #endregion // Exception Handling
+ }
+ }
+
+ #endregion // SubsribeAsync
+
+ #region SubsribeToSingleAsync
+
+ ///
+ /// Subscribe to specific shard.
+ ///
+ /// The consumer plan.
+ /// The function.
+ /// The options.
+ /// The cancellation token.
+ private async Task SubsribeToSingleAsync(
+ IConsumerPlan plan,
+ Func> func,
+ ConsumerOptions options,
+ CancellationToken cancellationToken)
+ {
+ var claimingTrigger = options.ClaimingTrigger;
+ var minIdleTime = (int)options.ClaimingTrigger.MinIdleTime.TotalMilliseconds;
+
+ var env = plan.Environment.ToDash();
+ string uri = plan.Uri.ToDash();
+ string fullUri = plan.FullUri();
+ string consumerGroup = plan.ConsumerGroup.ToDash();
+
+ bool isFirstBatchOrFailure = true;
+
+ CommandFlags flags = CommandFlags.None;
+ string? fetchUntil = options.FetchUntilUnixDateOrEmpty?.ToString();
+
+ ILogger logger = plan.Logger ?? _logger;
+
+ #region await CreateConsumerGroupIfNotExistsAsync(...)
+
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ RedisChannelConstants.NONE_CONSUMER,
+ logger,
+ cancellationToken);
+
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ plan.ConsumerGroup,
+ logger,
+ cancellationToken);
+
+ #endregion // await CreateConsumerGroupIfNotExistsAsync(...)
+
+ int releaseDelay = INIT_RELEASE_DELAY;
+ int bachSize = options.BatchSize;
+
+ TimeSpan delay = TimeSpan.Zero;
+ int emptyBatchCount = 0;
+ //using (ETracer.StartActivity("consumer.loop", ActivityKind.Server))
+ //{
+ while (!cancellationToken.IsCancellationRequested)
+ {
+ var proceed = await HandleBatchAsync();
+ if (!proceed)
+ break;
+ }
+ //}
+
+ #region HandleBatchAsync
+
+ // Handle single batch
+ async ValueTask HandleBatchAsync()
+ {
+ var policy = _setting.Policy.Policy;
+ return await policy.ExecuteAsync(HandleBatchBreakerAsync, cancellationToken);
+ }
+
+ #endregion // HandleBatchAsync
+
+ #region HandleBatchBreakerAsync
+
+ async Task HandleBatchBreakerAsync(CancellationToken ct)
+ {
+ ct.ThrowIfCancellationRequested();
+
+ StreamEntry[] results = await ReadBatchAsync();
+ emptyBatchCount = results.Length == 0 ? emptyBatchCount + 1 : 0;
+ results = await ClaimStaleMessages(emptyBatchCount, results, ct);
+
+ if (results.Length == 0)
+ {
+ if (fetchUntil == null)
+ delay = await DelayIfEmpty(delay, cancellationToken);
+ return fetchUntil == null;
+ }
+
+ ct.ThrowIfCancellationRequested();
+
+ try
+ {
+ var batchCancellation = new CancellationTokenSource();
+ int i = 0;
+ batchCancellation.Token.Register(async () =>
+ {
+ // TODO: [bnaya 2023-06-19 #RELEASE] committed id should be captured
+ RedisValue[] freeTargets = results[i..].Select(m => m.Id).ToArray();
+ await ReleaseAsync(freeTargets);
+ });
+ // TODO: [bnaya 2023-06-19] enable parallel consuming (when order doesn't matters) See #RELEASE
+ for (; i < results.Length && !batchCancellation.IsCancellationRequested; i++)
+ {
+ StreamEntry result = results[i];
+ #region Metadata meta = ...
+
+ Dictionary channelMeta = result.Values.ToDictionary(m => m.Name, m => m.Value);
+
+ Metadata meta;
+ string? metaJson = channelMeta[META_SLOT];
+ string eventKey = ((string?)result.Id) ?? throw new ArgumentException(nameof(MetadataExtensions.Empty.EventKey));
+ if (string.IsNullOrEmpty(metaJson))
+ { // backward comparability
+
+ string channelType = ((string?)channelMeta[nameof(MetadataExtensions.Empty.ChannelType)]) ?? throw new EventSourcingException(nameof(MetadataExtensions.Empty.ChannelType));
+
+ if (channelType != CHANNEL_TYPE)
+ {
+ // TODO: [bnaya 2021-07] send metrics
+ logger.LogWarning($"{nameof(RedisConsumerChannel)} [{CHANNEL_TYPE}] omit handling message of type '{channelType}'");
+ await AckAsync(result.Id);
+ continue;
+ }
+
+ string id = ((string?)channelMeta[nameof(MetadataExtensions.Empty.MessageId)]) ?? throw new EventSourcingException(nameof(MetadataExtensions.Empty.MessageId));
+ string operation = ((string?)channelMeta[nameof(MetadataExtensions.Empty.Operation)]) ?? throw new EventSourcingException(nameof(MetadataExtensions.Empty.Operation));
+ long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
+ DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
+ if (fetchUntil != null && string.Compare(fetchUntil, result.Id) < 0)
+ return false;
+ meta = new Metadata
+ {
+ MessageId = id,
+ EventKey = eventKey,
+ Environment = plan.Environment,
+ Uri = plan.Uri,
+ Operation = operation,
+ ProducedAt = producedAt
+ };
+
+ }
+ else
+ {
+ meta = JsonSerializer.Deserialize(metaJson, EventSourceOptions.FullSerializerOptions) ?? throw new EventSourcingException(nameof(Metadata));
+ meta = meta with { EventKey = eventKey };
+
+ }
+
+ #endregion // Metadata meta = ...
+
+ ActivityContext parentContext = EventSourceTelemetryExtensions.ExtractSpan(channelMeta, ExtractTraceContext);
+ using var activity = ETracer.StartConsumerTrace(meta, parentContext);
+
+ #region IEnumerable ExtractTraceContext(Dictionary entries, string key)
+
+ IEnumerable ExtractTraceContext(Dictionary entries, string key)
+ {
+ try
+ {
+ if (entries.TryGetValue(key, out var value))
+ {
+ if (string.IsNullOrEmpty(value))
+ return Array.Empty();
+ return new[] { value.ToString() };
+ }
+ }
+ #region Exception Handling
+
+ catch (Exception ex)
+ {
+ Exception err = ex.FormatLazy();
+ _logger.LogError(err, "Failed to extract trace context: {error}", err);
+ }
+
+ #endregion // Exception Handling
+
+ return Enumerable.Empty();
+ }
+
+ #endregion // IEnumerable ExtractTraceContext(Dictionary entries, string key)
+
+ int local = i;
+ var cancellableIds = results[local..].Select(m => m.Id);
+ var ack = new AckOnce(
+ fullUri,
+ async (cause) =>
+ {
+ Activity.Current?.AddEvent("consumer.event.ack",
+ t => PrepareTrace(t).Add("cause", cause));
+ await AckAsync(result.Id);
+ },
+ plan.Options.AckBehavior, logger,
+ async (cause) =>
+ {
+ Activity.Current?.AddEvent("consumer.event.cancel",
+ t => PrepareTrace(t).Add("cause", cause));
+ batchCancellation.CancelSafe(); // cancel forward
+ await CancelAsync(cancellableIds);
+ });
+
+ #region OriginFilter
+
+ MessageOrigin originFilter = plan.Options.OriginFilter;
+ if (originFilter != MessageOrigin.None && (originFilter & meta.Origin) == MessageOrigin.None)
+ {
+ Ack.Set(ack);
+ #region Log
+
+ _logger.LogInformation("Event Source skip consuming of event [{event-key}] because if origin is [{origin}] while the origin filter is sets to [{origin-filter}], Operation:[{operation}], Stream:[{stream}]", meta.EventKey, meta.Origin, originFilter, meta.Operation, meta.FullUri());
+
+ #endregion // Log
+ continue;
+ }
+
+ #endregion // OriginFilter
+
+ #region var announcement = new Announcement(...)
+
+ Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
+ Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
+
+ var announcement = new Announcement
+ {
+ Metadata = meta,
+ Segments = segmets,
+ InterceptorsData = interceptions
+ };
+
+ #endregion // var announcement = new Announcement(...)
+
+ bool succeed;
+ ConcumeEventsOperationCounter.WithEnvUriOperation(meta).Add(1);
+ Activity? execActivity = null;
+ if (ETracer.HasListeners())
+ execActivity = ETracer.StartInternalTrace($"{env}.{uri}.{meta.Operation.ToDash()}.invoke");
+ using (execActivity)
+ {
+ succeed = await func(announcement, ack);
+ execActivity?.SetTag("succeed", succeed);
+ }
+ if (succeed)
+ {
+ releaseDelay = INIT_RELEASE_DELAY;
+ bachSize = options.BatchSize;
+ }
+ else
+ {
+ // TODO: [bnaya 2023-06-19 #RELEASE] committed id should be captured
+ if (options.PartialBehavior == Enums.PartialConsumerBehavior.Sequential)
+ {
+ using (ETracer.StartInternalTrace("consumer.release-events-on-failure"))
+ {
+ RedisValue[] freeTargets = results[i..].Select(m => m.Id).ToArray();
+ await ReleaseAsync(freeTargets); // release the rest of the batch which doesn't processed yet
+ }
+ }
+ }
+ }
+ }
+ catch
+ {
+ isFirstBatchOrFailure = true;
+ }
+ return true;
+ }
+
+ #endregion // HandleBatchBreakerAsync
+
+ #region ReadBatchAsync
+
+ // read batch entities from REDIS
+ async Task ReadBatchAsync()
+ {
+ // TBD: circuit-breaker
+ try
+ {
+ var r = await _setting.Policy.Policy.ExecuteAsync(async (ct) =>
+ {
+ ct.ThrowIfCancellationRequested();
+ StreamEntry[] values = Array.Empty();
+ values = await ReadSelfPending();
+
+ if (values.Length == 0)
+ {
+ isFirstBatchOrFailure = false;
+ string group = plan.ConsumerGroup;
+ using var activity = ETracer.StartInternalTrace("consumer.read-batch",
+ t => PrepareTrace(t)
+ .Add("consumer-group", group));
+
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ try
+ {
+ values = await db.StreamReadGroupAsync(
+ fullUri,
+ group,
+ plan.ConsumerName,
+ position: StreamPosition.NewMessages,
+ count: bachSize,
+ flags: flags)
+ .WithCancellation(ct, () => Array.Empty())
+ .WithCancellation(cancellationToken, () => Array.Empty());
+ PrepareMeter(ConcumeBatchCountCounter).Add(1);
+ activity?.SetTag("count", values.Length);
+ PrepareMeter(ConcumeEventsCounter).Add(values.Length);
+ }
+ #region Exception Handling
+
+ catch (RedisServerException ex) when (ex.Message.StartsWith("NOGROUP"))
+ {
+ PrepareMeter(ConcumeBatchFailureCounter).Add(1);
+ logger.LogWarning(ex, ex.Message);
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ plan.ConsumerGroup,
+ logger, cancellationToken);
+ }
+ catch (RedisServerException ex)
+ {
+ PrepareMeter(ConcumeBatchFailureCounter).Add(1);
+ logger.LogWarning(ex, ex.Message);
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ plan.ConsumerGroup,
+ logger, cancellationToken);
+ }
+
+ #endregion // Exception Handling
+ }
+ StreamEntry[] results = values ?? Array.Empty();
+ return results;
+ }, cancellationToken);
+ return r;
+ }
+ #region Exception Handling
+
+ catch (RedisTimeoutException ex)
+ {
+ logger.LogWarning(ex, "Event source [{source}] by [{consumer}]: Timeout", fullUri, plan.ConsumerName);
+ return Array.Empty();
+ }
+ catch (Exception ex)
+ {
+ logger.LogError(ex, "Fail to read from event source [{source}] by [{consumer}]", fullUri, plan.ConsumerName);
+ return Array.Empty();
+ }
+
+ #endregion // Exception Handling
+ }
+
+ #endregion // ReadBatchAsync
+
+ #region ReadSelfPending
+
+ // Check for pending messages of the current consumer (crash scenario)
+ async Task ReadSelfPending()
+ {
+
+ StreamEntry[] values = Array.Empty();
+ if (!isFirstBatchOrFailure)
+ return values;
+
+ using var _ = ETracer.StartInternalTrace("consumer.self-pending");
+
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ try
+ {
+ StreamPendingMessageInfo[] pendMsgInfo = await db.StreamPendingMessagesAsync(
+ fullUri,
+ plan.ConsumerGroup,
+ options.BatchSize,
+ plan.ConsumerName,
+ flags: CommandFlags.DemandMaster);
+ if (pendMsgInfo != null && pendMsgInfo.Length != 0)
+ {
+ var ids = pendMsgInfo
+ .Select(m => m.MessageId).ToArray();
+ if (ids.Length != 0)
+ {
+ values = await db.StreamClaimAsync(fullUri,
+ plan.ConsumerGroup,
+ plan.ConsumerName,
+ 0,
+ ids,
+ flags: CommandFlags.DemandMaster);
+ values = values ?? Array.Empty();
+ _logger.LogInformation("Claimed messages: {ids}", ids);
+ }
+ }
+
+ return values;
+ }
+ #region Exception Handling
+
+ catch (RedisServerException ex) when (ex.Message.StartsWith("NOGROUP"))
+ {
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ plan.ConsumerGroup,
+ logger, cancellationToken);
+ return Array.Empty();
+ }
+
+ #endregion // Exception Handling
+ }
+
+ #endregion // ReadSelfPending
+
+ #region ClaimStaleMessages
+
+ // Taking work from other consumers which have log-time's pending messages
+ async Task ClaimStaleMessages(
+ int emptyBatchCount,
+ StreamEntry[] values,
+ CancellationToken ct)
+ {
+ var logger = plan.Logger ?? _logger;
+ ct.ThrowIfCancellationRequested();
+ if (values.Length != 0) return values;
+ if (emptyBatchCount < claimingTrigger.EmptyBatchCount)
+ return values;
+ using var _ = ETracer.StartInternalTrace("consumer.stale-events");
+ try
+ {
+ IDatabaseAsync db = await _connFactory.GetDatabaseAsync(ct);
+ StreamPendingInfo pendingInfo;
+ using (ETracer.StartInternalTrace("consumer.events-stealing.pending"))
+ {
+ pendingInfo = await db.StreamPendingAsync(fullUri, plan.ConsumerGroup, flags: CommandFlags.DemandMaster);
+ }
+ PrepareMeter(StealCountCounter).Add(1);
+ foreach (var c in pendingInfo.Consumers)
+ {
+ var self = c.Name == plan.ConsumerName;
+ if (self) continue;
+ try
+ {
+ StreamPendingMessageInfo[] pendMsgInfo;
+ using (ETracer.StartInternalTrace("consumer.events-stealing.pending-events",
+ t => PrepareTrace(t).Add("from-consumer", c.Name)))
+ {
+ pendMsgInfo = await db.StreamPendingMessagesAsync(
+ fullUri,
+ plan.ConsumerGroup,
+ 10,
+ c.Name,
+ pendingInfo.LowestPendingMessageId,
+ pendingInfo.HighestPendingMessageId,
+ flags: CommandFlags.DemandMaster);
+ }
+
+ RedisValue[] ids = pendMsgInfo
+ .Where(x => x.IdleTimeInMilliseconds > minIdleTime)
+ .Select(m => m.MessageId).ToArray();
+ if (ids.Length == 0)
+ continue;
+
+ #region Log
+ logger.LogInformation("Event Source Consumer [{name}]: Claimed {count} events, from Consumer [{name}]", plan.ConsumerName, c.PendingMessageCount, c.Name);
+
+ #endregion // Log
+
+ int count = ids.Length;
+ PrepareMeter(StealAmountCounter).WithTag("from-consumer", c.Name)
+ .Add(count);
+ // will claim events only if older than _setting.ClaimingTrigger.MinIdleTime
+ using (ETracer.StartInternalTrace("consumer.events-stealing.claim",
+ t => PrepareTrace(t)
+ .Add("from-consumer", c.Name)
+ .Add("message-count", count)))
+ {
+ values = await db.StreamClaimAsync(fullUri,
+ plan.ConsumerGroup,
+ c.Name,
+ minIdleTime,
+ ids,
+ flags: CommandFlags.DemandMaster);
+ }
+ if (values.Length != 0)
+ logger.LogInformation("Event Source Consumer [{name}]: Claimed {count} messages, from Consumer [{name}]", plan.ConsumerName, c.PendingMessageCount, c.Name);
+ }
+ #region Exception Handling
+
+ catch (RedisTimeoutException ex)
+ {
+ logger.LogWarning(ex, "Timeout (handle pending): {name}{self}", c.Name, self);
+ continue;
+ }
+
+ catch (Exception ex)
+ {
+ logger.LogError(ex, "Fail to claim pending: {name}{self}", c.Name, self);
+ }
+
+ #endregion // Exception Handling
+
+ if (values != null && values.Length != 0)
+ return values;
+ }
+ }
+ #region Exception Handling
+
+ catch (RedisConnectionException ex)
+ {
+ _logger.LogWarning(ex, "Fail to claim REDIS's pending");
+ }
+
+ catch (Exception ex)
+ {
+ _logger.LogError(ex, "Fail to claim pending");
+ }
+
+ #endregion // Exception Handling
+
+ return Array.Empty();
+ }
+
+ #endregion // ClaimStaleMessages
+
+ #region AckAsync
+
+ // Acknowledge event handling (prevent re-consuming of the message).
+ async ValueTask AckAsync(RedisValue messageId)
+ {
+ try
+ {
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ // release the event (won't handle again in the future)
+ await db.StreamAcknowledgeAsync(fullUri,
+ plan.ConsumerGroup,
+ messageId,
+ flags: CommandFlags.DemandMaster);
+ }
+ catch (Exception ex)
+ {
+ // TODO: [bnaya 2020-10] do better handling (re-throw / swallow + reason) currently logged at the wrapping class
+ logger.LogWarning(ex.FormatLazy(), $"Fail to acknowledge message [{messageId}]");
+ throw;
+ }
+ }
+
+ #endregion // AckAsync
+
+ #region CancelAsync
+
+ // Cancels the asynchronous.
+ ValueTask CancelAsync(IEnumerable messageIds)
+ {
+ // no way to release consumed item back to the stream
+ //try
+ //{
+ // // release the event (won't handle again in the future)
+ // await db.StreamClaimIdsOnlyAsync(key,
+ // plan.ConsumerGroup,
+ // RedisValue.Null,
+ // 0,
+ // messageIds.ToArray(),
+ // flags: CommandFlags.DemandMaster);
+ //}
+ //catch (Exception)
+ //{ // TODO: [bnaya 2020-10] do better handling (re-throw / swallow + reason) currently logged at the wrapping class
+ // throw;
+ //}
+ return ValueTask.CompletedTask;
+
+ }
+
+ #endregion // CancelAsync
+
+ #region ReleaseAsync
+
+
+ // Releases the messages (work around).
+ async Task ReleaseAsync(RedisValue[] freeTargets)
+ {
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ try
+ {
+ using (ETracer.StartInternalTrace("consumer.release-ownership",
+ t => PrepareTrace(t).Add("consumer-group", plan.ConsumerGroup)))
+ {
+ await db.StreamClaimAsync(fullUri,
+ plan.ConsumerGroup,
+ RedisChannelConstants.NONE_CONSUMER,
+ 1,
+ freeTargets,
+ flags: CommandFlags.DemandMaster);
+ }
+ using (ETracer.StartInternalTrace("consumer.release.delay",
+ t => PrepareTrace(t).Add("delay", releaseDelay)))
+ {
+ // let other potential consumer the chance of getting ownership
+ await Task.Delay(releaseDelay, cancellationToken);
+ }
+ if (releaseDelay < MAX_RELEASE_DELAY)
+ releaseDelay = Math.Min(releaseDelay * 2, MAX_RELEASE_DELAY);
+
+ if (bachSize == options.BatchSize)
+ bachSize = 1;
+ else
+ bachSize = Math.Min(bachSize * 2, options.BatchSize);
+ }
+ #region Exception Handling
+
+ catch (RedisServerException ex) when (ex.Message.StartsWith("NOGROUP"))
+ {
+ await _connFactory.CreateConsumerGroupIfNotExistsAsync(
+ plan,
+ plan.ConsumerGroup,
+ logger, cancellationToken);
+ }
+
+ #endregion // Exception Handling
+ }
+
+ #endregion // ReleaseAsync
+
+ ITagAddition PrepareTrace(ITagAddition t) => t.Add("uri", uri).Add("env", env)
+ .Add("group-name", consumerGroup);
+ ICounterBuilder PrepareMeter(Counter t) => t.WithTag("uri", uri)
+ .WithTag("env", env)
+ .WithTag("group-name", consumerGroup);
+ }
+
+ #endregion // SubsribeToSingleAsync
+
+ #region GetByIdAsync
+
+ ///
+ /// Gets announcement data by id.
+ ///
+ /// The entry identifier.
+ /// The plan.
+ /// The cancellation token.
+ ///
+ async ValueTask IConsumerChannelProvider.GetByIdAsync(
+ EventKey entryId,
+ IConsumerPlan plan,
+ CancellationToken cancellationToken)
+ {
+ string mtdName = $"{nameof(IConsumerChannelProvider)}.{nameof(IConsumerChannelProvider.GetByIdAsync)}";
+
+ try
+ {
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ StreamEntry entry = await FindAsync(entryId);
+
+ #region var announcement = new Announcement(...)
+
+ Dictionary channelMeta = entry.Values.ToDictionary(m => m.Name, m => m.Value);
+ string channelType = GetMeta(nameof(MetadataExtensions.Empty.ChannelType));
+ string id = GetMeta(nameof(MetadataExtensions.Empty.MessageId));
+ string operation = GetMeta(nameof(MetadataExtensions.Empty.Operation));
+ long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
+
+ #region string GetMeta(string propKey)
+
+ string GetMeta(string propKey)
+ {
+ string? result = channelMeta[propKey];
+ if (result == null) throw new ArgumentNullException(propKey);
+ return result;
+ }
+
+ #endregion // string GetMeta(string propKey)
+
+ DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
+#pragma warning disable CS8601 // Possible null reference assignment.
+ var meta = new Metadata
+ {
+ MessageId = id,
+ EventKey = entry.Id,
+ Environment = plan.Environment,
+ Uri = plan.Uri,
+ Operation = operation,
+ ProducedAt = producedAt,
+ ChannelType = channelType
+ };
+#pragma warning restore CS8601 // Possible null reference assignment.
+
+ Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
+ Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
+
+ var announcement = new Announcement
+ {
+ Metadata = meta,
+ Segments = segmets,
+ InterceptorsData = interceptions
+ };
+
+ #endregion // var announcement = new Announcement(...)
+
+ return announcement;
+
+ #region FindAsync
+
+ async Task FindAsync(EventKey entryId)
+ {
+ string lookForId = (string)entryId;
+ string fullUri = plan.FullUri();
+
+ string originId = lookForId;
+ int len = originId.IndexOf('-');
+ string fromPrefix = originId.Substring(0, len);
+ long start = long.Parse(fromPrefix);
+ string startPosition = (start - 1).ToString();
+ int iteration = 0;
+ for (int i = 0; i < READ_BY_ID_ITERATIONS; i++) // up to 1000 items
+ {
+ iteration++;
+ StreamEntry[] entries = await db.StreamReadAsync(
+ fullUri,
+ startPosition,
+ READ_BY_ID_CHUNK_SIZE,
+ CommandFlags.DemandMaster);
+ if (entries.Length == 0)
+ throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{fullUri}] return nothing, start at ({startPosition}, iteration = {iteration}).");
+ string k = string.Empty;
+ foreach (StreamEntry e in entries)
+ {
+#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
+#pragma warning disable CS8602 // Dereference of a possibly null reference.
+ k = e.Id;
+ string ePrefix = k.Substring(0, len);
+#pragma warning restore CS8602 // Dereference of a possibly null reference.
+#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
+ long comp = long.Parse(ePrefix);
+ if (comp < start)
+ continue; // not there yet
+ if (k == lookForId)
+ {
+ return e;
+ }
+ if (ePrefix != fromPrefix)
+ throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{fullUri}] return not exists.");
+ }
+ startPosition = k; // next batch will start from last entry
+ }
+ throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{fullUri}] return not found.");
+ }
+
+ #endregion // FindAsync
+ }
+ #region Exception Handling
+
+ catch (Exception ex)
+ {
+ string key = plan.FullUri();
+ _logger.LogError(ex.FormatLazy(), "{method} Failed: Entry [{entryId}] from [{key}] event stream",
+ mtdName, entryId, key);
+ throw;
+ }
+
+ #endregion // Exception Handling
+ }
+
+ #endregion // GetByIdAsync
+
+ #region GetAsyncEnumerable
+
+ ///
+ /// Gets asynchronous enumerable of announcements.
+ ///
+ /// The plan.
+ /// The options.
+ /// The cancellation token.
+ ///
+ async IAsyncEnumerable IConsumerChannelProvider.GetAsyncEnumerable(
+ IConsumerPlan plan,
+ ConsumerAsyncEnumerableOptions? options,
+ [EnumeratorCancellation] CancellationToken cancellationToken)
+ {
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ var loop = AsyncLoop().WithCancellation(cancellationToken);
+ await foreach (StreamEntry entry in loop)
+ {
+ if (cancellationToken.IsCancellationRequested) yield break;
+
+ #region var announcement = new Announcement(...)
+
+ Dictionary channelMeta = entry.Values.ToDictionary(m => m.Name, m => m.Value);
+#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
+#pragma warning disable CS8601 // Possible null reference assignment.
+ string id = channelMeta[nameof(MetadataExtensions.Empty.MessageId)];
+ string operation = channelMeta[nameof(MetadataExtensions.Empty.Operation)];
+ long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
+ DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
+ var meta = new Metadata
+ {
+ MessageId = id,
+ EventKey = entry.Id,
+ Environment = plan.Environment,
+ Uri = plan.Uri,
+ Operation = operation,
+ ProducedAt = producedAt
+ };
+#pragma warning restore CS8601 // Possible null reference assignment.
+#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
+ var filter = options?.OperationFilter;
+ if (filter != null && !filter(meta))
+ continue;
+
+ Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
+ Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
+
+ var announcement = new Announcement
+ {
+ Metadata = meta,
+ Segments = segmets,
+ InterceptorsData = interceptions
+ };
+
+ #endregion // var announcement = new Announcement(...)
+
+ yield return announcement;
+ }
+
+ #region AsyncLoop
+
+ async IAsyncEnumerable AsyncLoop()
+ {
+ string fullUri = plan.FullUri();
+
+ int iteration = 0;
+ RedisValue startPosition = options?.From ?? BEGIN_OF_STREAM;
+ TimeSpan delay = TimeSpan.Zero;
+ while (true)
+ {
+ if (cancellationToken.IsCancellationRequested) yield break;
+
+ iteration++;
+ StreamEntry[] entries = await db.StreamReadAsync(
+ fullUri,
+ startPosition,
+ READ_BY_ID_CHUNK_SIZE,
+ CommandFlags.DemandMaster);
+ if (entries.Length == 0)
+ {
+ if (options?.ExitWhenEmpty ?? true) yield break;
+ delay = await DelayIfEmpty(delay, cancellationToken);
+ continue;
+ }
+ string k = string.Empty;
+ foreach (StreamEntry e in entries)
+ {
+ if (cancellationToken.IsCancellationRequested) yield break;
+
+#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
+ k = e.Id;
+#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
+ if (options?.To != null && string.Compare(options?.To, k) < 0)
+ yield break;
+ yield return e;
+ }
+ startPosition = k; // next batch will start from last entry
+ }
+ }
+
+ #endregion // AsyncLoop
+ }
+
+ #endregion // GetAsyncEnumerable
+
+ #region ValueTask GetBucketAsync(StorageType storageType) // local function
+
+ ///
+ /// Gets a data bucket.
+ ///
+ /// The plan.
+ /// The channel meta.
+ /// The meta.
+ /// Type of the storage.
+ ///
+ private async ValueTask GetBucketAsync(
+ IConsumerPlan plan,
+ Dictionary channelMeta,
+ Metadata meta,
+ EventBucketCategories storageType)
+ {
+
+ IEnumerable strategies = await plan.StorageStrategiesAsync;
+ strategies = strategies.Where(m => m.IsOfTargetType(storageType));
+ Bucket bucket = Bucket.Empty;
+ if (strategies.Any())
+ {
+ foreach (var strategy in strategies)
+ {
+ using (ETracer.StartInternalTrace($"consumer.{strategy.Name}-storage.{storageType}.get"))
+ {
+ bucket = await strategy.LoadBucketAsync(meta, bucket, storageType, LocalGetProperty);
+ }
+ }
+ }
+ else
+ {
+ using (ETracer.StartInternalTrace($"consumer.{_defaultStorageStrategy.Name}-storage.{storageType}.get"))
+ {
+ bucket = await _defaultStorageStrategy.LoadBucketAsync(meta, bucket, storageType, LocalGetProperty);
+ }
+ }
+
+ return bucket;
+
+#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
+#pragma warning disable CS8603 // Possible null reference return.
+ string LocalGetProperty(string k) => (string)channelMeta[k];
+#pragma warning restore CS8603 // Possible null reference return.
+#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
+ }
+
+ #endregion // ValueTask StoreBucketAsync(StorageType storageType) // local function
+
+ #region DelayIfEmpty
+
+ // avoiding system hit when empty (mitigation of self DDoS)
+ private async Task DelayIfEmpty(TimeSpan previousDelay, CancellationToken cancellationToken)
+ {
+ var cfg = _setting.DelayWhenEmptyBehavior;
+ var newDelay = cfg.CalcNextDelay(previousDelay, cfg);
+ var limitDelay = Min(cfg.MaxDelay.TotalMilliseconds, newDelay.TotalMilliseconds);
+ newDelay = TimeSpan.FromMilliseconds(limitDelay);
+ using (ETracer.StartInternalTrace("consumer.delay.when-empty-queue",
+ t => t.Add("delay", newDelay)))
+ {
+ await Task.Delay(newDelay, cancellationToken);
+ }
+ return newDelay;
+ }
+
+ #endregion // DelayIfEmpty
+
+ #region GetKeysUnsafeAsync
+
+ ///
+ /// Gets the keys unsafe asynchronous.
+ ///
+ /// The pattern.
+ /// The cancellation token.
+ ///
+ public async IAsyncEnumerable GetKeysUnsafeAsync(
+ string pattern,
+ [EnumeratorCancellation] CancellationToken cancellationToken = default)
+ {
+ IConnectionMultiplexer multiplexer = await _connFactory.GetAsync(cancellationToken);
+ var distict = new HashSet();
+ while (!cancellationToken.IsCancellationRequested)
+ {
+ foreach (EndPoint endpoint in multiplexer.GetEndPoints())
+ {
+ IServer server = multiplexer.GetServer(endpoint);
+ // TODO: [bnaya 2020_09] check the pagination behavior
+#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
+#pragma warning disable CS8604 // Possible null reference argument.
+ await foreach (string key in server.KeysAsync(pattern: pattern))
+ {
+ if (distict.Contains(key))
+ continue;
+ distict.Add(key);
+ yield return key;
+ }
+#pragma warning restore CS8604 // Possible null reference argument.
+#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
+ }
+ }
+ }
+
+ #endregion // GetKeysUnsafeAsync
+}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs
similarity index 56%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs
index d27513f4..0b9936eb 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/DelayWhenEmptyBehavior.cs
@@ -1,13 +1,14 @@
using static System.Math;
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
+namespace EventSourcing.Backbone.Channels.RedisProvider
{
///
/// Behavior of delay when empty
///
public record DelayWhenEmptyBehavior
{
+ private const int MIN_DELAY_MILLI = 2;
public static readonly DelayWhenEmptyBehavior Default = new DelayWhenEmptyBehavior();
///
@@ -15,21 +16,28 @@ public record DelayWhenEmptyBehavior
///
public TimeSpan MaxDelay { get; init; } = TimeSpan.FromSeconds(5);
+ ///
+ /// The increment factor when increasing the delay (hang on empty).
+ /// The previous delay will multiply by this factor + Ceiling to endure increment.
+ ///
+ public double DelayFactor { get; init; } = 1.2;
+
///
/// Gets or sets the next delay.
///
- public Func CalcNextDelay { get; init; } = DefaultCalcNextDelay;
+ public Func CalcNextDelay { get; init; } = DefaultCalcNextDelay;
///
/// Default calculation of next delay.
///
/// The previous delay.
+ /// The setting.
///
- private static TimeSpan DefaultCalcNextDelay(TimeSpan previous)
+ private static TimeSpan DefaultCalcNextDelay(TimeSpan previous, DelayWhenEmptyBehavior setting)
{
var prevMilli = previous.TotalMilliseconds;
- var milli = Max(prevMilli * 2, 10);
+ var milli = Max(Math.Ceiling(prevMilli * setting.DelayFactor), MIN_DELAY_MILLI);
return TimeSpan.FromMilliseconds(milli);
}
}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs
similarity index 95%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs
index 47f987b5..b2ce638a 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/RedisConsumerChannelSetting.cs
@@ -1,7 +1,7 @@
using Polly;
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
+namespace EventSourcing.Backbone.Channels.RedisProvider
{
///
/// Represent specific setting of the consumer channel
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs
similarity index 98%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs
index aab1487f..3daad743 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/ResiliencePolicies.cs
@@ -1,7 +1,7 @@
using Polly;
using Polly.CircuitBreaker;
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
+namespace EventSourcing.Backbone.Channels.RedisProvider
{
///
/// Define when to claim stale (long waiting) messages from other consumers
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs
similarity index 94%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs
index ee660a75..a32babbc 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/Setting/StaleMessagesClaimingTrigger.cs
@@ -1,6 +1,6 @@
// TODO: [bnaya 2021-02] use Record
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
+namespace EventSourcing.Backbone.Channels.RedisProvider
{
///
/// Define when to claim stale (long waiting) messages from other consumers
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs
similarity index 87%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs
index bc13d8e0..11d441a9 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/StorageStrategies/RedisHashStorageStrategy.cs
@@ -1,6 +1,6 @@
using StackExchange.Redis;
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// Responsible to save information to REDIS hash storage.
@@ -11,17 +11,22 @@ namespace Weknow.EventSource.Backbone.Channels
///
internal class RedisHashStorageStrategy : IConsumerStorageStrategy
{
- private readonly IEventSourceRedisConnectionFacroty _connFactory;
+ private readonly IEventSourceRedisConnectionFactory _connFactory;
///
/// Initializes a new instance.
///
/// The database task.
- public RedisHashStorageStrategy(IEventSourceRedisConnectionFacroty connFactory)
+ public RedisHashStorageStrategy(IEventSourceRedisConnectionFactory connFactory)
{
_connFactory = connFactory;
}
+ ///
+ /// Gets the name of the storage provider.
+ ///
+ public string Name { get; } = "Redis";
+
///
/// Load the bucket information.
///
@@ -41,9 +46,9 @@ async ValueTask IConsumerStorageStrategy.LoadBucketAsync(
Func getProperty,
CancellationToken cancellation)
{
- string key = $"{meta.Key()}:{type}:{meta.MessageId}";
+ string key = $"{meta.FullUri()}:{type}:{meta.MessageId}";
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(cancellation);
IDatabaseAsync db = conn.GetDatabase();
HashEntry[] entities;
try
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/icon.png b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisConsumerProvider/icon.png differ
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/EventSourcing.Backbone.Channels.RedisProducerProvider.csproj b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/EventSourcing.Backbone.Channels.RedisProducerProvider.csproj
new file mode 100644
index 00000000..e2689aab
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/EventSourcing.Backbone.Channels.RedisProducerProvider.csproj
@@ -0,0 +1,32 @@
+
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerBuilder.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerBuilder.cs
new file mode 100644
index 00000000..e9046b8c
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerBuilder.cs
@@ -0,0 +1,190 @@
+using EventSourcing.Backbone.Channels.RedisProvider;
+using EventSourcing.Backbone.Private;
+
+using Microsoft.Extensions.Configuration;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+
+using OpenTelemetry.Trace;
+
+using Polly;
+
+using StackExchange.Redis;
+
+namespace EventSourcing.Backbone
+{
+ public static class RedisProducerBuilder
+ {
+ ///
+ /// Adds the event producer telemetry source (will result in tracing the producer).
+ ///
+ /// The builder.
+ ///
+ public static TracerProviderBuilder AddEventProducerTelemetry(this TracerProviderBuilder builder) => builder.AddSource(nameof(RedisProducerChannel));
+
+ ///
+ /// Uses REDIS producer channel.
+ ///
+ /// The resilience policy.
+ /// The configuration hook.
+ ///
+ public static IProducerStoreStrategyBuilder Create(
+ AsyncPolicy? resiliencePolicy = null,
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(configurationHook);
+ return CreateRedisProducerBuilder(configuration, resiliencePolicy);
+ }
+
+ ///
+ /// Uses REDIS producer channel.
+ ///
+ ///
+ /// Environment key of the end-point, if missing it use a default ('REDIS_EVENT_SOURCE_ENDPOINT').
+ /// If the environment variable doesn't exists, It assumed that the value represent an actual end-point and use it.
+ ///
+ ///
+ /// Environment key of the password, if missing it use a default ('REDIS_EVENT_SOURCE_PASS').
+ /// If the environment variable doesn't exists, It assumed that the value represent an actual password and use it.
+ ///
+ /// The resilience policy.
+ /// The configuration hook.
+ ///
+ public static IProducerStoreStrategyBuilder Create(
+ string endpoint,
+ string? password = null,
+ AsyncPolicy? resiliencePolicy = null,
+ Action? configurationHook = null)
+ {
+ var configuration = RedisClientFactory.CreateConfigurationOptions(endpoint, password, configurationHook);
+ return CreateRedisProducerBuilder(configuration, resiliencePolicy);
+ }
+
+ ///
+ /// Uses REDIS producer channel.
+ ///
+ /// The credential.
+ /// The resilience policy.
+ /// The configuration hook.
+ ///
+ public static IProducerStoreStrategyBuilder CreateRedisProducerBuilder(
+ this IRedisCredentials credential,
+ AsyncPolicy? resiliencePolicy = null,
+ Action? configurationHook = null)
+ {
+ var configuration = credential.CreateConfigurationOptions(configurationHook);
+ return CreateRedisProducerBuilder(configuration, resiliencePolicy);
+ }
+
+ ///
+ /// Uses REDIS producer channel.
+ ///
+ /// The configuration.
+ /// The resilience policy.
+ ///
+ ///
+ public static IProducerStoreStrategyBuilder CreateRedisProducerBuilder(
+ this ConfigurationOptions configuration,
+ AsyncPolicy? resiliencePolicy = null)
+ {
+ var builder = ProducerBuilder.Empty;
+
+ return builder.UseRedisChannel(configuration, resiliencePolicy);
+ }
+
+ ///
+ /// Uses REDIS producer channel.
+ ///
+ /// The builder.
+ /// The configuration.
+ /// The resilience policy.
+ ///
+ public static IProducerStoreStrategyBuilder UseRedisChannel(
+ this IProducerBuilder builder,
+ ConfigurationOptions? configuration = null,
+ AsyncPolicy? resiliencePolicy = null)
+ {
+ var result = builder.UseChannel(LocalCreate);
+ return result;
+
+ IProducerChannelProvider LocalCreate(ILogger logger)
+ {
+ var connFactory = EventSourceRedisConnectionFactory.Create(logger, configuration);
+ var channel = new RedisProducerChannel(
+ connFactory,
+ logger ?? EventSourceFallbakLogger.Default,
+ resiliencePolicy);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS producer channel.
+ /// This overload is used by the DI
+ ///
+ /// The builder.
+ /// The redis database.
+ /// The resilience policy.
+ ///
+ ///
+ internal static IProducerStoreStrategyBuilder UseRedisChannel(
+ this IProducerBuilder builder,
+ IEventSourceRedisConnectionFactory redisConnectionFactory,
+ AsyncPolicy? resiliencePolicy = null)
+ {
+ var result = builder.UseChannel(LocalCreate);
+ return result;
+
+ IProducerChannelProvider LocalCreate(ILogger logger)
+ {
+ var channel = new RedisProducerChannel(
+ redisConnectionFactory,
+ logger ?? EventSourceFallbakLogger.Default,
+ resiliencePolicy);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS producer channel by resolving it as a dependency injection from the service-provider.
+ ///
+ /// The builder.
+ /// The service provider.
+ /// The resilience policy.
+ ///
+ public static IProducerIocStoreStrategyBuilder ResolveRedisProducerChannel(
+ this IProducerBuilder builder,
+ IServiceProvider serviceProvider,
+ AsyncPolicy? resiliencePolicy = null)
+ {
+ var result = builder.UseChannel(serviceProvider, LocalCreate);
+ return result;
+
+ IProducerChannelProvider LocalCreate(ILogger logger)
+ {
+ var connFactory = serviceProvider.GetService();
+ if (connFactory == null)
+ throw new RedisConnectionException(ConnectionFailureType.None, $"{nameof(IEventSourceRedisConnectionFactory)} is not registered, use services.{nameof(RedisDiExtensions.AddEventSourceRedisConnection)} in order to register it at Setup stage.");
+ var channel = new RedisProducerChannel(
+ connFactory,
+ logger ?? EventSourceFallbakLogger.Default,
+ resiliencePolicy);
+ return channel;
+ }
+ }
+
+ ///
+ /// Uses REDIS producer channel by resolving it as a dependency injection from the service-provider.
+ ///
+ /// The service provider.
+ /// The resilience policy.
+ ///
+ public static IProducerIocStoreStrategyBuilder ResolveRedisProducerChannel(
+ this IServiceProvider serviceProvider,
+ AsyncPolicy? resiliencePolicy = null)
+ {
+ var result = ProducerBuilder.Empty.ResolveRedisProducerChannel(serviceProvider, resiliencePolicy);
+ return result;
+ }
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs
new file mode 100644
index 00000000..2f417c41
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs
@@ -0,0 +1,193 @@
+using System;
+using System.Collections.Immutable;
+using System.Diagnostics;
+using System.Diagnostics.Metrics;
+using System.Text.Json;
+
+using EventSourcing.Backbone.Producers;
+
+using Microsoft.Extensions.Logging;
+
+using OpenTelemetry;
+
+using Polly;
+
+using StackExchange.Redis;
+
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+
+using static EventSourcing.Backbone.Private.EventSourceTelemetry;
+
+namespace EventSourcing.Backbone.Channels.RedisProvider;
+
+internal class RedisProducerChannel : IProducerChannelProvider
+{
+ private readonly ILogger _logger;
+ private readonly AsyncPolicy _resiliencePolicy;
+ private readonly IEventSourceRedisConnectionFactory _connFactory;
+ private readonly IProducerStorageStrategy _defaultStorageStrategy;
+ private const string META_SLOT = "____";
+
+ private static readonly Counter ProduceEventsCounter = EMeter.CreateCounter("evt-src.sys.produce.events", "count",
+ "Sum of total produced events (messages)");
+
+ #region Ctor
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The redis database promise.
+ /// The logger.
+ /// The resilience policy for retry.
+ public RedisProducerChannel(
+ IEventSourceRedisConnectionFactory redisFactory,
+ ILogger logger,
+ AsyncPolicy? resiliencePolicy)
+ {
+ _connFactory = redisFactory;
+ _logger = logger;
+ _resiliencePolicy = resiliencePolicy ??
+ Policy.Handle()
+ .RetryAsync(3);
+ _defaultStorageStrategy = new RedisHashStorageStrategy(_connFactory, logger);
+ }
+
+
+ #endregion // Ctor
+
+ #region SendAsync
+
+ ///
+ /// Sends raw announcement.
+ ///
+ /// The raw announcement data.
+ /// The storage strategy.
+ ///
+ /// Return the message id
+ ///
+ public async ValueTask SendAsync(
+ Announcement payload,
+ ImmutableArray storageStrategy)
+ {
+ Metadata meta = payload.Metadata;
+ string id = meta.MessageId;
+ string env = meta.Environment.ToDash();
+ string uri = meta.UriDash;
+ using var activity = ETracer.StartInternalTrace($"producer.{meta.Operation}.process",
+ t => t.Add("env", env)
+ .Add("uri", uri)
+ .Add("message-id", id));
+
+ #region var entries = new NameValueEntry[]{...}
+
+ string metaJson = JsonSerializer.Serialize(meta, EventSourceOptions.FullSerializerOptions);
+
+ // local method
+ NameValueEntry KV(RedisValue key, RedisValue value) => new NameValueEntry(key, value);
+ ImmutableArray commonEntries = ImmutableArray.Create(
+ KV(nameof(meta.MessageId), id),
+ KV(nameof(meta.Operation), meta.Operation),
+ KV(nameof(meta.ProducedAt), meta.ProducedAt.ToUnixTimeSeconds()),
+ KV(nameof(meta.ChannelType), CHANNEL_TYPE),
+ KV(nameof(meta.Origin), meta.Origin.ToString()),
+ KV(META_SLOT, metaJson)
+ );
+
+ #endregion // var entries = new NameValueEntry[]{...}
+
+ RedisValue messageId = await _resiliencePolicy.ExecuteAsync(LocalStreamAddAsync);
+
+ return (string?)messageId ?? "0000000000000-0";
+
+ #region LocalStreamAddAsync
+
+ async Task LocalStreamAddAsync()
+ {
+ await LocalStoreBucketAsync(EventBucketCategories.Segments);
+ await LocalStoreBucketAsync(EventBucketCategories.Interceptions);
+
+ var telemetryBuilder = commonEntries.ToBuilder();
+ using Activity? activity = ETracer.StartProducerTrace(meta);
+ activity.InjectSpan(telemetryBuilder, LocalInjectTelemetry);
+ var entries = telemetryBuilder.ToArray();
+
+ try
+ {
+ IConnectionMultiplexer conn = await _connFactory.GetAsync(CancellationToken.None);
+ IDatabaseAsync db = conn.GetDatabase();
+ // using var scope = SuppressInstrumentationScope.Begin();
+ var k = meta.FullUri();
+ ProduceEventsCounter.WithTag("uri", uri).WithTag("env", env).Add(1);
+ var result = await db.StreamAddAsync(k, entries,
+ flags: CommandFlags.DemandMaster);
+ return result;
+ }
+ #region Exception Handling
+
+ catch (RedisConnectionException ex)
+ {
+ _logger.LogError(ex, "REDIS Connection Failure: push event [{id}] into the [{env}:{URI}] stream: {operation}",
+ meta.MessageId, env, uri, meta.Operation);
+ throw;
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError(ex, "Fail to push event [{id}] into the [{env}:{URI}] stream: {operation}",
+ meta.MessageId, env, uri, meta.Operation);
+ throw;
+ }
+
+ #endregion // Exception Handling
+
+ #region ValueTask StoreBucketAsync(StorageType storageType) // local function
+
+ async ValueTask LocalStoreBucketAsync(EventBucketCategories storageType)
+ {
+ var strategies = storageStrategy.Where(m => m.IsOfTargetType(storageType));
+ Bucket bucket = storageType == EventBucketCategories.Segments ? payload.Segments : payload.InterceptorsData;
+ if (strategies.Any())
+ {
+ foreach (var strategy in strategies)
+ {
+ await SaveBucketAsync(strategy);
+ }
+ }
+ else
+ {
+ await SaveBucketAsync(_defaultStorageStrategy);
+ }
+
+ async ValueTask SaveBucketAsync(IProducerStorageStrategy strategy)
+ {
+ using (ETracer.StartInternalTrace($"evt-src.producer.{strategy.Name}-storage.{storageType}.set"))
+ {
+ IImmutableDictionary metaItems =
+ await strategy.SaveBucketAsync(id, bucket, storageType, meta);
+ foreach (var item in metaItems)
+ {
+ commonEntries = commonEntries.Add(KV(item.Key, item.Value));
+ }
+ }
+ }
+ }
+
+ #endregion // ValueTask StoreBucketAsync(StorageType storageType) // local function
+
+ #region LocalInjectTelemetry
+
+ void LocalInjectTelemetry(
+ ImmutableArray.Builder builder,
+ string key,
+ string value)
+ {
+ builder.Add(KV(key, value));
+ }
+
+ #endregion // LocalInjectTelemetry
+ }
+
+ #endregion // LocalStreamAddAsync
+ }
+
+ #endregion // SendAsync
+}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs
similarity index 82%
rename from Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs
rename to Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs
index 00235a55..82518f7f 100644
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/StorageStrategies/RedisHashStorageStrategy.cs
@@ -4,7 +4,7 @@
using StackExchange.Redis;
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// Responsible to save information to REDIS hash storage.
@@ -15,7 +15,7 @@ namespace Weknow.EventSource.Backbone.Channels
///
internal class RedisHashStorageStrategy : IProducerStorageStrategy
{
- private readonly IEventSourceRedisConnectionFacroty _connFactory;
+ private readonly IEventSourceRedisConnectionFactory _connFactory;
private readonly ILogger _logger;
#region Ctor
@@ -26,7 +26,7 @@ internal class RedisHashStorageStrategy : IProducerStorageStrategy
/// The connection factory.
/// The logger.
public RedisHashStorageStrategy(
- IEventSourceRedisConnectionFacroty connFactory,
+ IEventSourceRedisConnectionFactory connFactory,
ILogger logger)
{
_connFactory = connFactory;
@@ -35,6 +35,11 @@ public RedisHashStorageStrategy(
#endregion // Ctor
+ ///
+ /// Gets the name of the storage provider.
+ ///
+ public string Name { get; } = "Redis";
+
///
/// Saves the bucket information.
///
@@ -53,7 +58,7 @@ async ValueTask> IProducerStorageStrategy.S
Metadata meta,
CancellationToken cancellation)
{
- var conn = await _connFactory.GetAsync();
+ var conn = await _connFactory.GetAsync(cancellation);
try
{
IDatabaseAsync db = conn.GetDatabase();
@@ -62,15 +67,15 @@ async ValueTask> IProducerStorageStrategy.S
.Select(sgm =>
new HashEntry(sgm.Key, sgm.Value))
.ToArray();
- var key = $"{meta.Key()}:{type}:{id}";
+ var key = $"{meta.FullUri()}:{type}:{id}";
await db.HashSetAsync(key, segmentsEntities);
- return ImmutableDictionary.Empty; // .Add($"redis:{type}:key", key);
+ return ImmutableDictionary.Empty;
}
catch (Exception ex)
{
- _logger.LogError(ex, "Fail to Save event's [{id}] buckets [{type}], into the [{partition}->{shard}] stream: {operation}, IsConnecting: {connecting}",
- id, type, meta.Partition, meta.Shard, meta.Operation, conn.IsConnecting);
+ _logger.LogError(ex, "Fail to Save event's [{id}] buckets [{type}], into the [{URI}] stream: {operation}, IsConnecting: {connecting}",
+ id, type, meta.Uri, meta.Operation, conn.IsConnecting);
throw;
}
}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/icon.png b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProducerProvider/icon.png differ
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/EventSourcing.Backbone.Channels.RedisProvider.Common.csproj b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/EventSourcing.Backbone.Channels.RedisProvider.Common.csproj
new file mode 100644
index 00000000..c4880379
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/EventSourcing.Backbone.Channels.RedisProvider.Common.csproj
@@ -0,0 +1,26 @@
+
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFactory.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFactory.cs
new file mode 100644
index 00000000..c7b2df9b
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFactory.cs
@@ -0,0 +1,289 @@
+using System.Diagnostics;
+using System.Diagnostics.Metrics;
+
+using Microsoft.Extensions.Logging;
+
+using StackExchange.Redis;
+
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+using static EventSourcing.Backbone.Private.EventSourceTelemetry;
+
+#pragma warning disable S3881 // "IDisposable" should be implemented correctly
+#pragma warning disable S2953 // Methods named "Dispose" should implement "IDisposable.Dispose"
+
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Event Source connection (for IoC)
+ /// Because IConnectionMultiplexer may be used by other component,
+ /// It's more clear to wrap the IConnectionMultiplexer for easier resove by IoC.
+ /// This factory is also responsible of the connection health.
+ /// It will return same connection as long as it healthy.
+ ///
+ public class EventSourceRedisConnectionFactory : IEventSourceRedisConnectionFactory, IDisposable, IAsyncDisposable
+ {
+ private const int CLOSE_DELEY_MILLISECONDS = 5000;
+ private Task _redisTask;
+ private readonly ILogger _logger;
+ private readonly ConfigurationOptions _configuration;
+ private readonly AsyncLock _lock = new AsyncLock(TimeSpan.FromSeconds(CLOSE_DELEY_MILLISECONDS));
+ private DateTime _lastResetConnection = DateTime.Now;
+ private int _reconnectTry = 0;
+ private const string CHANGE_CONN = "redis-change-connection";
+ private static readonly Counter ReConnectCounter = EMeter.CreateCounter(CHANGE_CONN, "count",
+ "count how many time the connection was re-create");
+
+ #region Ctor
+
+ ///
+ /// Constructor
+ ///
+ /// The logger.
+ /// The configuration.
+ public EventSourceRedisConnectionFactory(
+ ILogger logger,
+ ConfigurationOptions? configuration = null)
+ : this((ILogger)logger, configuration)
+ {
+ }
+
+ ///
+ /// Constructor
+ ///
+ /// The logger.
+ /// The configuration.
+ private EventSourceRedisConnectionFactory(
+ ILogger logger,
+ ConfigurationOptions? configuration = null)
+ {
+ _logger = logger;
+ if (configuration == null)
+ {
+ var cred = new RedisCredentialsEnvKeys();
+ _configuration = cred.CreateConfigurationOptions();
+ }
+ else
+ {
+ _configuration = configuration;
+ }
+ _redisTask = RedisClientFactory.CreateProviderAsync(_configuration, logger);
+ }
+
+ #endregion // Ctor
+
+ #region Create
+
+ ///
+ /// Create instance
+ ///
+ /// The configuration.
+ /// The logger.
+ ///
+ public static IEventSourceRedisConnectionFactory Create(
+ ILogger logger,
+ ConfigurationOptions? configuration = null)
+ {
+ return new EventSourceRedisConnectionFactory(logger, configuration);
+ }
+
+ ///
+ /// Create instance
+ ///
+ /// The credential.
+ /// The logger.
+ /// The configuration hook.
+ ///
+ public static IEventSourceRedisConnectionFactory Create(
+ IRedisCredentials credential,
+ ILogger logger,
+ Action? configurationHook = null)
+ {
+ var configuration = credential.CreateConfigurationOptions();
+ return new EventSourceRedisConnectionFactory(logger, configuration);
+ }
+
+ ///
+ /// Create instance
+ ///
+ /// The logger.
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// The configuration hook.
+ ///
+ public static IEventSourceRedisConnectionFactory Create(
+ ILogger logger,
+ string endpoint,
+ string? password = null,
+ Action? configurationHook = null)
+ {
+ var credential = new RedisCredentialsRaw(endpoint, password);
+ return Create(credential, logger, configurationHook);
+ }
+
+ ///
+ /// Create instance from environment variable
+ ///
+ /// The logger.
+ /// The endpoint.
+ /// The password.
+ /// The configuration hook.
+ ///
+ public static IEventSourceRedisConnectionFactory CreateFromEnv(
+ ILogger logger,
+ string endpointEnvKey,
+ string passwordEnvKey = PASSWORD_KEY,
+ Action? configurationHook = null)
+ {
+ var credential = new RedisCredentialsEnvKeys(endpointEnvKey, passwordEnvKey);
+ return Create(credential, logger, configurationHook);
+ }
+
+ #endregion // Create
+
+ #region Kind
+
+ ///
+ /// Gets the kind.
+ ///
+ protected virtual string Kind { get; } = "Event-Sourcing";
+
+ #endregion // Kind
+
+ #region GetAsync
+
+ ///
+ /// Get a valid connection
+ ///
+ /// The cancellation token.
+ ///
+ async Task IEventSourceRedisConnectionFactory.GetAsync(CancellationToken cancellationToken)
+ {
+ var conn = await _redisTask;
+ if (conn.IsConnected)
+ return conn;
+ string status = conn.GetStatus();
+ _logger.LogWarning("REDIS Connection [{kind}] [{ClientName}]: status = [{status}]",
+ Kind,
+ conn.ClientName, status);
+ var disp = await _lock.AcquireAsync(cancellationToken);
+ using (disp)
+ {
+ conn = await _redisTask;
+ if (conn.IsConnected)
+ return conn;
+ int tryNumber = Interlocked.Increment(ref _reconnectTry);
+ _logger.LogWarning("[{kind}] Reconnecting to REDIS: try=[{tryNumber}], client name=[{clientName}]",
+ Kind, tryNumber, conn.ClientName);
+ var duration = DateTime.Now - _lastResetConnection;
+ if (duration > TimeSpan.FromSeconds(5))
+ {
+ _lastResetConnection = DateTime.Now;
+ var cn = conn;
+ Activity.Current?.AddEvent(CHANGE_CONN, t => t.Add("redis.operation-kind", Kind));
+ ReConnectCounter.WithTag("redis.operation-kind", Kind).Add(1);
+ Task _ = Task.Delay(CLOSE_DELEY_MILLISECONDS).ContinueWith(_ => cn.CloseAsync());
+ _redisTask = _configuration.CreateProviderAsync(_logger);
+ var newConn = await _redisTask;
+ return newConn;
+ }
+ return conn;
+ }
+ }
+
+ #endregion // GetAsync
+
+ #region GetDatabaseAsync
+
+ ///
+ /// Get database
+ ///
+ /// The cancellation token.
+ ///
+ async Task IEventSourceRedisConnectionFactory.GetDatabaseAsync(CancellationToken cancellationToken)
+ {
+ IEventSourceRedisConnectionFactory self = this;
+ IConnectionMultiplexer conn = await self.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ return db;
+ }
+
+ #endregion // GetDatabaseAsync
+
+ #region Dispose (pattern)
+
+ ///
+ /// Disposed indication
+ ///
+ public bool Disposed { get; private set; }
+
+ ///
+ /// Dispose
+ ///
+ ///
+ private void Dispose(bool disposing)
+ {
+ try
+ {
+ _logger.LogWarning("REDIS [{kind}]: Disposing connection", Kind);
+ }
+ catch { }
+ try
+ {
+ if (!Disposed)
+ {
+ var conn = _redisTask.Result;
+ conn.Dispose();
+ Disposed = true;
+ OnDispose(disposing);
+ }
+ }
+ catch { }
+ }
+
+ ///
+ /// Dispose
+ ///
+ void IDisposable.Dispose()
+ {
+ GC.SuppressFinalize(this);
+ // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
+ Dispose(disposing: true);
+ }
+
+ ///
+ /// Called when [dispose].
+ ///
+ /// if set to true [disposing].
+ ///
+ protected virtual void OnDispose(bool disposing) { }
+
+ ///
+ /// Dispose
+ ///
+ ///
+ public async ValueTask DisposeAsync()
+ {
+ GC.SuppressFinalize(this);
+ try
+ {
+ _logger.LogWarning("REDIS [{kind}]: Disposing connection (async)", Kind);
+ }
+ catch { }
+ var redis = await _redisTask;
+ redis.Dispose();
+ OnDispose(true);
+ }
+
+ ///
+ /// Finalizer
+ ///
+ ~EventSourceRedisConnectionFactory()
+ {
+ // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
+ Dispose(disposing: false);
+ }
+
+ #endregion // Dispose (pattern)
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFactory.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFactory.cs
new file mode 100644
index 00000000..6d54f837
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFactory.cs
@@ -0,0 +1,23 @@
+using StackExchange.Redis;
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Connection factory
+ ///
+ public interface IEventSourceRedisConnectionFactory
+ {
+ ///
+ /// Get a valid connection
+ ///
+ /// The cancellation token.
+ ///
+ Task GetAsync(CancellationToken cancellationToken);
+ ///
+ /// Get database
+ ///
+ /// The cancellation token.
+ ///
+ Task GetDatabaseAsync(CancellationToken cancellationToken);
+ }
+}
\ No newline at end of file
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IRedisCredentials.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IRedisCredentials.cs
new file mode 100644
index 00000000..87728c96
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/IRedisCredentials.cs
@@ -0,0 +1,11 @@
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Redis credentials abstraction
+ ///
+ public interface IRedisCredentials
+ {
+ string? Endpoint { get; }
+ string? Password { get; }
+ }
+}
\ No newline at end of file
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs
new file mode 100644
index 00000000..3937b0aa
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs
@@ -0,0 +1,263 @@
+using System.Reflection;
+using System.Text;
+
+using Microsoft.Extensions.Logging;
+
+using StackExchange.Redis;
+
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// REDIS client factory
+ ///
+ public static class RedisClientFactory
+ {
+ private static int _index = 0;
+ private const string CONNECTION_NAME_PATTERN = "ev-src:{0}:{1}:{2}";
+ private static readonly string? ASSEMBLY_NAME = Assembly.GetEntryAssembly()?.GetName()?.Name?.ToDash();
+ private static readonly Version? ASSEMBLY_VERSION = Assembly.GetEntryAssembly()?.GetName()?.Version;
+
+ #region CreateConfigurationOptions
+
+ ///
+ /// Create REDIS configuration options.
+ ///
+ /// A configuration hook.
+ ///
+ public static ConfigurationOptions CreateConfigurationOptions(
+ Action? configurationHook = null)
+ {
+ IRedisCredentials credential = new RedisCredentialsEnvKeys();
+ var redis = credential.CreateConfigurationOptions(configurationHook);
+ return redis;
+ }
+
+ ///
+ /// Create REDIS configuration options.
+ ///
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// A configuration hook.
+ ///
+ public static ConfigurationOptions CreateConfigurationOptions(
+ string endpoint,
+ string? password = null,
+ Action? configurationHook = null)
+ {
+ IRedisCredentials credential = new RedisCredentialsRaw(endpoint, password);
+ var redis = credential.CreateConfigurationOptions(configurationHook);
+ return redis;
+ }
+
+ ///
+ /// Create REDIS configuration options.
+ ///
+ /// The credential's environment keys.
+ /// A configuration hook.
+ ///
+ public static ConfigurationOptions CreateConfigurationOptions(
+ this IRedisCredentials credential,
+ Action? configurationHook = null)
+ {
+ var (endpoint, password) = credential switch
+ {
+ RedisCredentialsRaw raw => (raw.Endpoint, raw.Password),
+ RedisCredentialsEnvKeys env => (
+ Environment.GetEnvironmentVariable(env.Endpoint ?? END_POINT_KEY),
+ Environment.GetEnvironmentVariable(env.Password ?? PASSWORD_KEY)
+ ),
+ _ => throw new InvalidOperationException(credential?.GetType()?.Name)
+ };
+
+ #region Validation
+
+ if (string.IsNullOrEmpty(endpoint))
+ throw new InvalidOperationException($"{nameof(endpoint)} is null");
+
+ #endregion // Validation
+
+ // https://stackexchange.github.io/StackExchange.Redis/Configuration.html
+ var configuration = ConfigurationOptions.Parse(endpoint);
+ configuration.Password = password;
+ if (configurationHook != null)
+ configuration.Apply(configurationHook);
+
+ return configuration;
+ }
+
+ #endregion // CreateConfigurationOptions
+
+ #region CreateProviderAsync
+
+ ///
+ /// Create REDIS client.
+ ///
+ /// The logger.
+ /// A configuration hook.
+ ///
+ public static async Task CreateProviderAsync(
+ ILogger? logger = null,
+ Action? configurationHook = null)
+ {
+ IRedisCredentials credential = new RedisCredentialsEnvKeys();
+ var redis = await credential.CreateProviderAsync(logger, configurationHook);
+ return redis;
+ }
+
+ ///
+ /// Create REDIS client.
+ ///
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// The logger.
+ /// A configuration hook.
+ ///
+ public static async Task CreateProviderAsync(
+ string endpoint,
+ string? password = null,
+ ILogger? logger = null,
+ Action? configurationHook = null)
+ {
+ IRedisCredentials credential = new RedisCredentialsRaw(endpoint, password);
+ var redis = await credential.CreateProviderAsync(logger, configurationHook);
+ return redis;
+ }
+
+ ///
+ /// Create REDIS client.
+ ///
+ /// The credential's environment keys.
+ /// The logger.
+ /// A configuration hook.
+ ///
+ public static async Task CreateProviderAsync(
+ this IRedisCredentials credential,
+ ILogger? logger = null,
+ Action? configurationHook = null)
+ {
+ try
+ {
+ // https://stackexchange.github.io/StackExchange.Redis/Configuration.html
+ var configuration = credential.CreateConfigurationOptions(configurationHook);
+ var redis = await configuration.CreateProviderAsync(logger);
+ return redis;
+ }
+ catch (Exception ex)
+ {
+ if (logger != null)
+ logger.LogError(ex.FormatLazy(), "REDIS CONNECTION Setting ERROR");
+ else
+ Console.WriteLine($"REDIS CONNECTION Setting ERROR: {ex.FormatLazy()}");
+ throw;
+ }
+ }
+
+ ///
+ /// Create REDIS client.
+ ///
+ /// The logger.
+ /// The configuration.
+ ///
+ ///
+ ///
+ /// Fail to establish REDIS connection
+ /// Fail to establish REDIS connection
+ public static async Task CreateProviderAsync(
+ this ConfigurationOptions configuration,
+ ILogger? logger = null)
+ {
+ try
+ {
+ var sb = new StringBuilder();
+ var writer = new StringWriter(sb);
+
+ // https://stackexchange.github.io/StackExchange.Redis/Configuration.html
+ configuration.ClientName = string.Format(
+ CONNECTION_NAME_PATTERN,
+ ASSEMBLY_NAME,
+ ASSEMBLY_VERSION,
+ Interlocked.Increment(ref _index));
+
+ configuration = configuration.Apply(cfg =>
+ {
+ // keep retry to get connection on failure
+ cfg.AbortOnConnectFail = false;
+#pragma warning disable S125
+ /*
+ cfg.ConnectTimeout = 15;
+ cfg.SyncTimeout = 10;
+ cfg.AsyncTimeout = 10;
+ cfg.DefaultDatabase = Debugger.IsAttached ? 1 : null;
+ */
+#pragma warning restore S125
+ }
+);
+
+
+ IConnectionMultiplexer redis = await ConnectionMultiplexer.ConnectAsync(configuration, writer);
+ string endpoints = string.Join(";", configuration.EndPoints);
+ if (logger != null)
+ logger.LogInformation("REDIS Connection [{envKey}]: {info} succeed",
+ endpoints,
+ sb);
+ else
+ Console.WriteLine($"REDIS Connection [{endpoints}] succeed: {sb}");
+ redis.ConnectionFailed += OnConnectionFailed;
+ redis.ErrorMessage += OnConnErrorMessage;
+ redis.InternalError += OnInternalConnError;
+
+ return redis;
+ }
+ catch (Exception ex)
+ {
+ if (logger != null)
+ logger.LogError(ex.FormatLazy(), "REDIS CONNECTION ERROR");
+ else
+ Console.WriteLine($"REDIS CONNECTION ERROR: {ex.FormatLazy()}");
+ throw;
+ }
+
+ #region Event Handlers
+
+ void OnInternalConnError(object? sender, InternalErrorEventArgs e)
+ {
+ if (logger != null)
+ {
+ logger.LogError(e.Exception, "REDIS Connection internal failure: Failure type = {typeOfConnection}, Origin = {typeOfFailure}",
+ e.ConnectionType, e.Origin);
+ }
+ else
+ Console.WriteLine($"REDIS Connection internal failure: Failure type = {e.ConnectionType}, Origin = {e.Origin}");
+ }
+
+ void OnConnErrorMessage(object? sender, RedisErrorEventArgs e)
+ {
+ if (logger != null)
+ {
+ logger.LogWarning("REDIS Connection error: {message}",
+ e.Message);
+ }
+ else
+ Console.WriteLine($"REDIS Connection error: {e.Message}");
+ }
+
+
+ void OnConnectionFailed(object? sender, ConnectionFailedEventArgs e)
+ {
+ if (logger != null)
+ {
+ logger.LogError(e.Exception, "REDIS Connection failure: Failure type = {typeOfConnection}, Failure type = {typeOfFailure}", e.ConnectionType, e.FailureType);
+ }
+ else
+ Console.WriteLine($"REDIS Connection failure: Failure type = {e.ConnectionType}, Failure type = {e.FailureType}");
+ }
+
+ #endregion // Event Handlers
+
+ }
+
+ #endregion // CreateProviderAsync
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsEnvKeys.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsEnvKeys.cs
new file mode 100644
index 00000000..41c898b3
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsEnvKeys.cs
@@ -0,0 +1,10 @@
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Environment keys for REDIS's credentials
+ ///
+ public record RedisCredentialsEnvKeys(string Endpoint = END_POINT_KEY, string Password = PASSWORD_KEY) : IRedisCredentials;
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsRaw.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsRaw.cs
new file mode 100644
index 00000000..85b1db87
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsRaw.cs
@@ -0,0 +1,33 @@
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Raw keys for REDIS's credentials
+ ///
+ public record RedisCredentialsRaw : IRedisCredentials
+ {
+ #region Ctor
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ /// endpoint
+ public RedisCredentialsRaw(string endpoint, string? password = null)
+ {
+ Endpoint = endpoint ?? throw new ArgumentNullException(nameof(endpoint));
+ Password = password;
+ }
+
+ #endregion // Ctor
+
+ ///
+ /// The raw endpoint (not an environment variable)
+ ///
+ public string? Endpoint { get; }
+ ///
+ /// The password (not an environment variable).
+ ///
+ public string? Password { get; }
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs
new file mode 100644
index 00000000..0ba27877
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs
@@ -0,0 +1,26 @@
+namespace EventSourcing.Backbone.Channels.RedisProvider.Common;
+
+public static class RedisChannelConstants
+{
+ public const string CHANNEL_TYPE = "REDIS Channel V1";
+ public const string META_ARRAY_SEPARATOR = "~|~";
+
+ public const string END_POINT_KEY = "REDIS_EVENT_SOURCE_ENDPOINT";
+ public const string PASSWORD_KEY = "REDIS_EVENT_SOURCE_PASS";
+
+ ///
+ /// a work around used to release messages back to the stream (consumer)
+ ///
+ public const string NONE_CONSUMER = "__NONE_CUNSUMER__";
+
+ public static class MetaKeys
+ {
+ public const string SegmentsKeys = "segments-keys";
+ public const string InterceptorsKeys = "interceptors-keys";
+ public const string TelemetryBaggage = "telemetry-baggage";
+ public const string TelemetrySpanId = "telemetry-span-id";
+ public const string TelemetryTraceId = "telemetry-trace-id";
+ }
+
+}
+
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs
new file mode 100644
index 00000000..ff54c3e7
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs
@@ -0,0 +1,223 @@
+using System.Diagnostics;
+using System.Diagnostics.Metrics;
+
+using Microsoft.Extensions.Logging;
+
+using StackExchange.Redis;
+
+using static EventSourcing.Backbone.Private.EventSourceTelemetry;
+
+namespace EventSourcing.Backbone.Private
+{
+ ///
+ /// Redis common provider extensions
+ ///
+ public static class RedisCommonProviderExtensions
+ {
+ private const int DELAY_ON_MISSING_KEY = 5;
+ private const int MIN_DELAY = 2;
+ private const int SPIN_LIMIT = 30;
+ private const int MAX_DELAY = 3_000;
+ private static Counter KeyMissingCounter = EMeter.CreateCounter("evt-src.sys.key-missing", "count", "count missing key events");
+ private static Counter CreateConsumerGroupCounter = EMeter.CreateCounter("evt-src.sys.create-consumer-group", "count", "creating a consumer group");
+ private static Counter CreateConsumerGroupRetryCounter = EMeter.CreateCounter("evt-src.sys.create-consumer-group-retry", "count", "retries of creating a consumer group");
+
+ private static readonly AsyncLock _lock = new AsyncLock(TimeSpan.FromSeconds(20));
+
+ #region CreateConsumerGroupIfNotExistsAsync
+
+ ///
+ /// Creates the consumer group if not exists asynchronous.
+ ///
+ /// The connection factory.
+ /// The plan.
+ /// The consumer group.
+ /// The logger.
+ /// The cancellation token.
+ ///
+ public static async Task CreateConsumerGroupIfNotExistsAsync(
+ this IEventSourceRedisConnectionFactory connFactory,
+ IConsumerPlan plan,
+ string consumerGroup,
+ ILogger logger,
+ CancellationToken cancellationToken)
+ {
+ Env env = plan.Environment;
+ string uri = plan.UriDash;
+ string fullUri = plan.FullUri();
+
+ StreamGroupInfo[] groupsInfo = Array.Empty();
+ using var track = ETracer.StartInternalTrace("consumer.create-consumer-group",
+ t => PrepareTrace(t));
+
+ PrepareMeter(CreateConsumerGroupCounter).Add(1);
+ int delay = MIN_DELAY;
+ bool exists = false;
+ int tryNumber = 0;
+ var retryCounter = PrepareMeter(CreateConsumerGroupRetryCounter);
+ var missingCounter = PrepareMeter(KeyMissingCounter);
+ while (groupsInfo.Length == 0)
+ {
+ if (tryNumber != 0)
+ retryCounter.Add(1);
+ tryNumber++;
+
+ IConnectionMultiplexer conn = await connFactory.GetAsync(cancellationToken);
+ IDatabaseAsync db = conn.GetDatabase();
+ try
+ {
+ #region delay on retry
+
+ if (tryNumber > SPIN_LIMIT)
+ {
+ delay = Math.Min(delay * 2, MAX_DELAY);
+ using (ETracer.StartInternalTrace("consumer.delay.key-not-exists",
+ t => PrepareTrace(t)
+ .Add("delay", delay)
+ .Add("try-number", tryNumber)))
+ {
+ await Task.Delay(delay);
+ }
+ if (tryNumber % 10 == 0)
+ {
+ logger.LogWarning("Create Consumer Group If Not Exists: still waiting {info}", CurrentInfo());
+ }
+ }
+
+ #endregion // delay on retry
+
+ #region Validation (if key exists)
+
+ if (!await db.KeyExistsAsync(fullUri,
+ flags: CommandFlags.DemandMaster))
+ {
+ missingCounter.Add(1);
+ await Task.Delay(DELAY_ON_MISSING_KEY);
+ if (tryNumber == 0 || tryNumber > SPIN_LIMIT)
+ logger.LogDebug("Key not exists (yet): {info}", CurrentInfo());
+ continue;
+ }
+
+ #endregion // Validation (if key exists)
+
+ using (ETracer.StartInternalTrace("consumer.get-consumer-group-info",
+ t => PrepareTrace(t)
+ .Add("try-number", tryNumber)))
+ {
+ using var lk = await _lock.AcquireAsync(cancellationToken);
+ groupsInfo = await db.StreamGroupInfoAsync(
+ fullUri,
+ flags: CommandFlags.DemandMaster);
+ exists = groupsInfo.Any(m => m.Name == consumerGroup);
+ }
+ }
+ #region Exception Handling
+
+ catch (RedisServerException ex)
+ {
+ if (await db.KeyExistsAsync(fullUri,
+ flags: CommandFlags.DemandMaster))
+ {
+ logger.LogWarning(ex, "Create Consumer Group If Not Exists: failed. {info}", CurrentInfo());
+ }
+ else
+ {
+ logger.LogDebug(ex, "Create Consumer Group If Not Exists: failed. {info}", CurrentInfo());
+ }
+ }
+ catch (RedisConnectionException ex)
+ {
+ logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: connection failure. {info}", CurrentInfo());
+ }
+ catch (RedisTimeoutException ex)
+ {
+ logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: timeout failure. {info}", CurrentInfo());
+ }
+ catch (Exception ex)
+ {
+ logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: unexpected failure. {info}", CurrentInfo());
+ }
+
+ #endregion // Exception Handling
+ if (!exists)
+ {
+ try
+ {
+ using (ETracer.StartInternalTrace("consumer.create-consumer-group",
+ t => PrepareTrace(t)
+ .Add("try-number", tryNumber)))
+ {
+ using var lk = await _lock.AcquireAsync(cancellationToken);
+ if (await db.StreamCreateConsumerGroupAsync(fullUri,
+ consumerGroup,
+ StreamPosition.Beginning,
+ flags: CommandFlags.DemandMaster))
+ {
+ break;
+ }
+ }
+ }
+ #region Exception Handling
+
+ catch (RedisServerException ex)
+ {
+ logger.LogWarning(ex.FormatLazy(), $"""
+ {nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync:
+ failed & still waiting
+ {CurrentInfo()}
+ """);
+ }
+ catch (RedisConnectionException ex)
+ {
+ logger.LogWarning(ex.FormatLazy(), $"""
+ {nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync:
+ Connection failure
+ {CurrentInfo()}
+ """);
+ }
+ catch (ObjectDisposedException)
+ {
+ logger.LogWarning($"""
+ {nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync:
+ Connection might not being available
+ {CurrentInfo()}
+ """);
+ }
+
+ catch (Exception ex)
+ {
+ logger.LogWarning(ex, $"""
+ {nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync:
+ unexpected failure
+ {CurrentInfo()}
+ """);
+ }
+
+ #endregion // Exception Handling
+ }
+
+ #region string CurrentInfo()
+
+ string CurrentInfo() => @$"
+Try number: {tryNumber}
+Stream key: {uri}
+Consumer Group: {consumerGroup}
+Is Connected: {db.Multiplexer.IsConnected}
+Configuration: {db.Multiplexer.Configuration}
+";
+
+ #endregion // string CurrentInfo()
+ }
+
+
+ ITagAddition PrepareTrace(ITagAddition t) => t.Add("uri", uri)
+ .Add("env", env)
+ .Add("group-name", consumerGroup);
+ ICounterBuilder PrepareMeter(Counter t) => t.WithTag("uri", uri)
+ .WithTag("env", env)
+ .WithTag("group-name", consumerGroup);
+ }
+
+ #endregion // CreateConsumerGroupIfNotExistsAsync
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs
new file mode 100644
index 00000000..01cb70b9
--- /dev/null
+++ b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs
@@ -0,0 +1,84 @@
+using EventSourcing.Backbone;
+
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+
+using static EventSourcing.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
+
+namespace Microsoft.Extensions.Configuration
+{
+ ///
+ /// The redis DI extensions.
+ ///
+ public static class RedisDiExtensions
+ {
+ ///
+ /// Adds the event source redis connection to the DI.
+ ///
+ /// The services.
+ /// An IServiceCollection.
+ public static IServiceCollection AddEventSourceRedisConnection(
+ this IServiceCollection services)
+ {
+ services.AddSingleton();
+
+ return services;
+ }
+
+ ///
+ /// Adds the event source redis connection to the DI.
+ ///
+ /// The services.
+ /// The raw endpoint (not an environment variable).
+ /// The password (not an environment variable).
+ ///
+ /// An IServiceCollection.
+ ///
+ public static IServiceCollection AddEventSourceRedisConnection(
+ this IServiceCollection services,
+ string endpoint,
+ string? password = null)
+ {
+ services.AddSingleton(
+ sp =>
+ {
+ ILogger logger = sp.GetService>() ??
+ throw new EventSourcingException(
+ $"{nameof(AddEventSourceRedisConnection)}: Cannot resolve a logger");
+
+ var factory = EventSourceRedisConnectionFactory.Create(logger, endpoint, password);
+ return factory;
+ });
+
+ return services;
+ }
+
+ ///
+ /// Adds the event source redis connection to the DI.
+ ///
+ /// The services.
+ /// The environment variable key of the endpoint.
+ /// The environment variable key of the password.
+ ///
+ /// An IServiceCollection.
+ ///
+ public static IServiceCollection AddEventSourceRedisConnectionFromEnv(
+ this IServiceCollection services,
+ string endpointEnvKey,
+ string passwordEnvKey = PASSWORD_KEY)
+ {
+ services.AddSingleton(
+ sp =>
+ {
+ ILogger logger = sp.GetService>() ??
+ throw new EventSourcingException(
+ $"{nameof(AddEventSourceRedisConnectionFromEnv)}: Cannot resolve a logger");
+
+ var factory = EventSourceRedisConnectionFactory.CreateFromEnv(logger, endpointEnvKey, passwordEnvKey);
+ return factory;
+ });
+
+ return services;
+ }
+ }
+}
diff --git a/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/icon.png b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/REDIS/EventSourcing.Backbone.Channels.RedisProvider.Common/icon.png differ
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs
deleted file mode 100644
index 6d6ff9f5..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerChannel.cs
+++ /dev/null
@@ -1,1077 +0,0 @@
-using System.Diagnostics;
-using System.Net;
-using System.Runtime.CompilerServices;
-using System.Text.Json;
-
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-using Weknow.EventSource.Backbone.Building;
-using Weknow.EventSource.Backbone.Channels.RedisProvider.Common;
-using Weknow.EventSource.Backbone.Private;
-
-using static System.Math;
-using static Weknow.EventSource.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
-
-// TODO: [bnaya 2021-07] MOVE TELEMETRY TO THE BASE CLASSES OF PRODUCER / CONSUME
-
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
-{
- ///
- /// The redis consumer channel.
- ///
- internal class RedisConsumerChannel : IConsumerChannelProvider
- {
- private const string BEGIN_OF_STREAM = "0000000000000";
- ///
- /// Max delay
- ///
- private const int MAX_DELAY = 5000;
- ///
- /// The read by identifier chunk size.
- /// REDIS don't have option to read direct position (it read from a position, not includes the position itself),
- /// therefore read should start before the actual position.
- ///
- private const int READ_BY_ID_CHUNK_SIZE = 10;
- ///
- /// Receiver max iterations
- ///
- private const int READ_BY_ID_ITERATIONS = 1000 / READ_BY_ID_CHUNK_SIZE;
- private static readonly ActivitySource ACTIVITY_SOURCE = new ActivitySource(EventSourceConstants.REDIS_CONSUMER_CHANNEL_SOURCE);
-
- private readonly ILogger _logger;
- private readonly RedisConsumerChannelSetting _setting;
- private readonly IEventSourceRedisConnectionFacroty _connFactory;
- private readonly IConsumerStorageStrategy _defaultStorageStrategy;
- private const string META_SLOT = "____";
- private const int INIT_RELEASE_DELAY = 100;
- private const int MAX_RELEASE_DELAY = 1000 * 30; // 30 seconds
-
- #region Ctor
-
- ///
- /// Initializes a new instance.
- ///
- /// The redis provider promise.
- /// The logger.
- /// The setting.
- public RedisConsumerChannel(
- IEventSourceRedisConnectionFacroty redisConnFactory,
- ILogger logger,
- RedisConsumerChannelSetting? setting = null)
- {
- _logger = logger;
- _connFactory = redisConnFactory;
- _defaultStorageStrategy = new RedisHashStorageStrategy(redisConnFactory);
- _setting = setting ?? RedisConsumerChannelSetting.Default;
- }
-
- ///
- /// Initializes a new instance.
- ///
- /// The logger.
- /// The configuration.
- /// The setting.
- /// Environment keys of the credentials
- public RedisConsumerChannel(
- ILogger logger,
- Action? configuration = null,
- RedisConsumerChannelSetting? setting = null,
- RedisCredentialsKeys credentialsKeys = default) : this(
- new EventSourceRedisConnectionFacroty(
- logger,
- configuration,
- credentialsKeys),
- logger,
- setting)
- {
- }
-
- #endregion // Ctor
-
- #region SubsribeAsync
-
- ///
- /// Subscribe to the channel for specific metadata.
- ///
- /// The consumer plan.
- /// The function.
- /// The cancellation token.
- ///
- /// When completed
- ///
- public async ValueTask SubsribeAsync(
- IConsumerPlan plan,
- Func> func,
- CancellationToken cancellationToken)
- {
- var joinCancellationSource = CancellationTokenSource.CreateLinkedTokenSource(plan.Cancellation, cancellationToken);
- var joinCancellation = joinCancellationSource.Token;
- ConsumerOptions options = plan.Options;
-
- ILogger? logger = _logger ?? plan.Logger;
- logger.LogInformation("REDIS EVENT-SOURCE | SUBSCRIBE key: [{key}], consumer-group: [{consumer-group}], consumer-name: [{consumer-name}]", plan.Key(), plan.ConsumerGroup, plan.ConsumerName);
-
- while (!joinCancellation.IsCancellationRequested)
- {
- try
- {
- if (plan.Shard != string.Empty)
- await SubsribeShardAsync(plan, func, options, joinCancellation);
- else
- await SubsribePartitionAsync(plan, func, options, joinCancellation);
-
- if (options.FetchUntilUnixDateOrEmpty != null)
- break;
- }
- #region Exception Handling
-
- catch (OperationCanceledException)
- {
- if (_logger == null)
- Console.WriteLine($"Subscribe cancellation [{plan.Key()}] event stream (may have reach the messages limit)");
- else
- _logger.LogError("Subscribe cancellation [{partition}->{shard}] event stream (may have reach the messages limit)",
- plan.Partition, plan.Shard);
- joinCancellationSource.CancelSafe();
- }
- catch (Exception ex)
- {
- if (_logger == null)
- Console.WriteLine($"Fail to subscribe into the [{plan.Key()}] event stream");
- else
- _logger.LogError(ex, "Fail to subscribe into the [{partition}->{shard}] event stream",
- plan.Partition, plan.Shard);
- throw;
- }
-
- #endregion // Exception Handling
- }
- }
-
- #endregion // SubsribeAsync
-
- #region SubsribePartitionAsync
-
- ///
- /// Subscribe to all shards under a partition.
- ///
- /// The consumer plan.
- /// The function.
- /// The options.
- /// The cancellation token.
- ///
- /// When completed
- ///
- private async ValueTask SubsribePartitionAsync(
- IConsumerPlan plan,
- Func> func,
- ConsumerOptions options,
- CancellationToken cancellationToken)
- {
- var subscriptions = new Queue();
- int delay = 1;
- string partition = plan.Partition;
- int partitionSplit = partition.Length + 1;
- while (!cancellationToken.IsCancellationRequested)
- { // loop for error cases
- try
- {
- // infinite until cancellation (return unique shareds)
- var keys = GetKeysUnsafeAsync(pattern: $"{partition}:*")
- .WithCancellation(cancellationToken);
-
- await foreach (string key in keys)
- {
- string shard = key.Substring(partitionSplit);
- IConsumerPlan p = plan.WithShard(shard);
- // infinite task (until cancellation)
- Task subscription = SubsribeShardAsync(plan, func, options, cancellationToken);
- subscriptions.Enqueue(subscription);
- }
-
- break;
- }
- catch (Exception ex)
- {
- plan.Logger.LogError(ex, "Partition subscription");
- await DelayIfRetry();
- }
- }
-
- // run until cancellation or error
- await Task.WhenAll(subscriptions);
-
- #region DelayIfRetry
-
- async Task DelayIfRetry()
- {
- await Task.Delay(delay, cancellationToken);
- delay *= Max(delay, 2);
- delay = Min(MAX_DELAY, delay);
- }
-
- #endregion // DelayIfRetry
-
- }
-
- #endregion // SubsribePartitionAsync
-
- #region SubsribeShardAsync
-
- ///
- /// Subscribe to specific shard.
- ///
- /// The consumer plan.
- /// The function.
- /// The options.
- /// The cancellation token.
- private async Task SubsribeShardAsync(
- IConsumerPlan plan,
- Func> func,
- ConsumerOptions options,
- CancellationToken cancellationToken)
- {
- var claimingTrigger = options.ClaimingTrigger;
- var minIdleTime = (int)options.ClaimingTrigger.MinIdleTime.TotalMilliseconds;
-
- string key = plan.Key(); // $"{plan.Partition}:{plan.Shard}";
- bool isFirstBatchOrFailure = true;
-
- CommandFlags flags = CommandFlags.None;
- string? fetchUntil = options.FetchUntilUnixDateOrEmpty?.ToString();
-
- ILogger logger = plan.Logger ?? _logger;
-
- #region await db.CreateConsumerGroupIfNotExistsAsync(...)
-
- await _connFactory.CreateConsumerGroupIfNotExistsAsync(
- key,
- RedisChannelConstants.NONE_CONSUMER,
- logger);
-
- await _connFactory.CreateConsumerGroupIfNotExistsAsync(
- key,
- plan.ConsumerGroup,
- logger);
-
- #endregion // await db.CreateConsumerGroupIfNotExistsAsync(...)
-
- int releaseDelay = INIT_RELEASE_DELAY;
- int bachSize = options.BatchSize;
-
- TimeSpan delay = TimeSpan.Zero;
- int emptyBatchCount = 0;
- while (!cancellationToken.IsCancellationRequested && await HandleBatchAsync())
- {
- }
-
- #region HandleBatchAsync
-
- // Handle single batch
- async ValueTask HandleBatchAsync()
- {
- var policy = _setting.Policy.Policy;
- return await policy.ExecuteAsync(HandleBatchBreakerAsync, cancellationToken);
- }
-
-
- async Task HandleBatchBreakerAsync(CancellationToken ct)
- {
- ct.ThrowIfCancellationRequested();
-
- StreamEntry[] results = await ReadBatchAsync();
- emptyBatchCount = results.Length == 0 ? emptyBatchCount + 1 : 0;
- results = await ClaimStaleMessages(emptyBatchCount, results, ct);
-
- if (results.Length == 0)
- {
- if (fetchUntil == null)
- delay = await DelayIfEmpty(delay, cancellationToken);
- return fetchUntil == null;
- }
-
- ct.ThrowIfCancellationRequested();
-
- try
- {
- var batchCancellation = new CancellationTokenSource();
- int i = 0;
- batchCancellation.Token.Register(async () =>
- {
- RedisValue[] freeTargets = results[i..].Select(m => m.Id).ToArray();
- await ReleaseAsync(freeTargets);
- });
- for (; i < results.Length && !batchCancellation.IsCancellationRequested; i++)
- {
- StreamEntry result = results[i];
-
- #region Metadata meta = ...
-
- Dictionary channelMeta = result.Values.ToDictionary(m => m.Name, m => m.Value);
- Metadata meta;
- string? metaJson = channelMeta[META_SLOT];
- string eventKey = ((string?)result.Id) ?? throw new ArgumentException(nameof(MetadataExtensions.Empty.EventKey));
- if (string.IsNullOrEmpty(metaJson))
- { // backward comparability
-
- string channelType = ((string?)channelMeta[nameof(MetadataExtensions.Empty.ChannelType)]) ?? throw new ArgumentNullException(nameof(MetadataExtensions.Empty.ChannelType));
-
- if (channelType != CHANNEL_TYPE)
- {
- // TODO: [bnaya 2021-07] send metrics
- logger.LogWarning($"{nameof(RedisConsumerChannel)} [{CHANNEL_TYPE}] omit handling message of type '{channelType}'");
- await AckAsync(result.Id);
- continue;
- }
-
- string id = ((string?)channelMeta[nameof(MetadataExtensions.Empty.MessageId)]) ?? throw new ArgumentNullException(nameof(MetadataExtensions.Empty.MessageId));
- string operation = ((string?)channelMeta[nameof(MetadataExtensions.Empty.Operation)]) ?? throw new ArgumentNullException(nameof(MetadataExtensions.Empty.Operation));
- long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
- DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
- if (fetchUntil != null && string.Compare(fetchUntil, result.Id) < 0)
- return false;
- meta = new Metadata
- {
- MessageId = id,
- EventKey = eventKey,
- Environment = plan.Environment,
- Partition = plan.Partition,
- Shard = plan.Shard,
- Operation = operation,
- ProducedAt = producedAt
- };
-
- }
- else
- {
- //byte[] metabytes = Convert.FromBase64String(meta64);
- //string metaJson = Encoding.UTF8.GetString(metabytes);
- meta = JsonSerializer.Deserialize(metaJson, EventSourceOptions.FullSerializerOptions) ?? throw new ArgumentNullException(nameof(Metadata)); //, EventSourceJsonContext..Metadata);
- meta = meta with { EventKey = eventKey };
-
- }
-
- #endregion // Metadata meta = ...
-
- int local = i;
- var cancellableIds = results[local..].Select(m => m.Id);
- var ack = new AckOnce(
- () => AckAsync(result.Id),
- plan.Options.AckBehavior, logger,
- async () =>
- {
- batchCancellation.CancelSafe(); // cancel forward
- await CancelAsync(cancellableIds);
- });
-
- #region OriginFilter
-
- MessageOrigin originFilter = plan.Options.OriginFilter;
- if (originFilter != MessageOrigin.None && (originFilter & meta.Origin) == MessageOrigin.None)
- {
- Ack.Set(ack);
- #region Log
-
- _logger.LogInformation("Event Source skip consuming of event [{event-key}] because if origin is [{origin}] while the origin filter is sets to [{origin-filter}], Operation:[{operation}], Stream:[{stream}]", meta.EventKey, meta.Origin, originFilter, meta.Operation, meta.Key());
-
- #endregion // Log
- continue;
- }
-
- #endregion // OriginFilter
-
- #region var announcement = new Announcement(...)
-
- Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
- Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
-
- var announcement = new Announcement
- {
- Metadata = meta,
- Segments = segmets,
- InterceptorsData = interceptions
- };
-
- #endregion // var announcement = new Announcement(...)
-
- #region Start Telemetry Span
-
- ActivityContext parentContext = meta.ExtractSpan(channelMeta, ExtractTraceContext);
- // Start an activity with a name following the semantic convention of the OpenTelemetry messaging specification.
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#span-name
- var activityName = $"{meta.Operation} consume";
-
- bool traceAsParent = (DateTimeOffset.UtcNow - meta.ProducedAt) < plan.Options.TraceAsParent;
- ActivityContext parentActivityContext = traceAsParent ? parentContext : default;
- using var activity = ACTIVITY_SOURCE.StartActivity(
- activityName,
- ActivityKind.Consumer,
- parentActivityContext, links: new[] { new ActivityLink(parentContext) });
- meta.InjectTelemetryTags(activity);
-
- #region IEnumerable ExtractTraceContext(Dictionary entries, string key)
-
- IEnumerable ExtractTraceContext(Dictionary entries, string key)
- {
- try
- {
- if (entries.TryGetValue(key, out var value))
- {
- if (string.IsNullOrEmpty(value))
- return Array.Empty();
- return new[] { value.ToString() };
- }
- }
- #region Exception Handling
-
- catch (Exception ex)
- {
- Exception err = ex.FormatLazy();
- _logger.LogError(err, "Failed to extract trace context: {error}", err);
- }
-
- #endregion // Exception Handling
-
- return Enumerable.Empty();
- }
-
- #endregion // IEnumerable ExtractTraceContext(Dictionary entries, string key)
-
- #endregion // Start Telemetry Span
-
- bool succeed = await func(announcement, ack);
- if (succeed)
- {
- releaseDelay = INIT_RELEASE_DELAY;
- bachSize = options.BatchSize;
- }
- else
- {
- if (options.PartialBehavior == Enums.PartialConsumerBehavior.Sequential)
- {
- RedisValue[] freeTargets = results[i..].Select(m => m.Id).ToArray();
- await ReleaseAsync(freeTargets);
- await Task.Delay(1000, ct);
- }
- }
- }
- }
- catch
- {
- isFirstBatchOrFailure = true;
- }
- return true;
- }
-
- #endregion // HandleBatchAsync
-
- #region ReadBatchAsync
-
- // read batch entities from REDIS
- async Task ReadBatchAsync()
- {
- // TBD: circuit-breaker
- try
- {
- var r = await _setting.Policy.Policy.ExecuteAsync(async (ct) =>
- {
- ct.ThrowIfCancellationRequested();
- StreamEntry[] values = Array.Empty();
- values = await ReadSelfPending();
-
- if (values.Length == 0)
- {
- isFirstBatchOrFailure = false;
-
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
-
- values = await db.StreamReadGroupAsync(
- key,
- plan.ConsumerGroup,
- plan.ConsumerName,
- position: StreamPosition.NewMessages,
- count: bachSize,
- flags: flags)
- .WithCancellation(ct, () => Array.Empty())
- .WithCancellation(cancellationToken, () => Array.Empty());
- }
- StreamEntry[] results = values ?? Array.Empty();
- return results;
- }, cancellationToken);
- return r;
- }
- #region Exception Handling
-
- catch (RedisTimeoutException ex)
- {
- logger.LogWarning(ex, "Event source [{source}] by [{consumer}]: Timeout", key, plan.ConsumerName);
- return Array.Empty();
- }
- catch (Exception ex)
- {
- logger.LogError(ex, "Fail to read from event source [{source}] by [{consumer}]", key, plan.ConsumerName);
- return Array.Empty();
- }
-
- #endregion // Exception Handling
- }
-
- #endregion // ReadBatchAsync
-
- #region ReadSelfPending
-
- // Check for pending messages of the current consumer (crash scenario)
- async Task ReadSelfPending()
- {
- StreamEntry[] values = Array.Empty();
- if (!isFirstBatchOrFailure)
- return values;
-
-
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- StreamPendingMessageInfo[] pendMsgInfo = await db.StreamPendingMessagesAsync(
- key,
- plan.ConsumerGroup,
- options.BatchSize,
- plan.ConsumerName,
- flags: CommandFlags.DemandMaster);
- if (pendMsgInfo != null && pendMsgInfo.Length != 0)
- {
- var ids = pendMsgInfo
- .Select(m => m.MessageId).ToArray();
- if (ids.Length != 0)
- {
- values = await db.StreamClaimAsync(key,
- plan.ConsumerGroup,
- plan.ConsumerName,
- 0,
- ids,
- flags: CommandFlags.DemandMaster);
- values = values ?? Array.Empty();
- _logger.LogInformation("Claimed messages: {ids}", ids);
- }
- }
-
- return values;
- }
-
- #endregion // ReadSelfPending
-
- #region ClaimStaleMessages
-
- // Taking work from other consumers which have log-time's pending messages
- async Task ClaimStaleMessages(
- int emptyBatchCount,
- StreamEntry[] values,
- CancellationToken ct)
- {
- var logger = plan.Logger ?? _logger;
- ct.ThrowIfCancellationRequested();
- if (values.Length != 0) return values;
- if (emptyBatchCount < claimingTrigger.EmptyBatchCount)
- return values;
- try
- {
- IDatabaseAsync db = await _connFactory.GetDatabaseAsync();
- StreamPendingInfo pendingInfo = await db.StreamPendingAsync(key, plan.ConsumerGroup, flags: CommandFlags.DemandMaster);
- foreach (var c in pendingInfo.Consumers)
- {
- var self = c.Name == plan.ConsumerName;
- if (self) continue;
- try
- {
- var pendMsgInfo = await db.StreamPendingMessagesAsync(
- key,
- plan.ConsumerGroup,
- 10,
- c.Name,
- pendingInfo.LowestPendingMessageId,
- pendingInfo.HighestPendingMessageId,
- flags: CommandFlags.DemandMaster);
-
-
-
- RedisValue[] ids = pendMsgInfo
- .Where(x => x.IdleTimeInMilliseconds > minIdleTime)
- .Select(m => m.MessageId).ToArray();
- if (ids.Length == 0)
- continue;
-
- #region Log
- logger.LogInformation("Event Source Consumer [{name}]: Claimed {count} messages, from Consumer [{name}]", plan.ConsumerName, c.PendingMessageCount, c.Name);
-
- #endregion // Log
-
- // will claim messages only if older than _setting.ClaimingTrigger.MinIdleTime
- values = await db.StreamClaimAsync(key,
- plan.ConsumerGroup,
- c.Name,
- minIdleTime,
- ids,
- flags: CommandFlags.DemandMaster);
- if (values.Length != 0)
- logger.LogInformation("Event Source Consumer [{name}]: Claimed {count} messages, from Consumer [{name}]", plan.ConsumerName, c.PendingMessageCount, c.Name);
- }
- #region Exception Handling
-
- catch (RedisTimeoutException ex)
- {
- logger.LogWarning(ex, "Timeout (handle pending): {name}{self}", c.Name, self);
- continue;
- }
-
- catch (Exception ex)
- {
- logger.LogError(ex, "Fail to claim pending: {name}{self}", c.Name, self);
- }
-
- #endregion // Exception Handling
-
- if (values != null && values.Length != 0)
- return values;
- }
- }
- #region Exception Handling
-
- catch (RedisConnectionException ex)
- {
- _logger.LogWarning(ex, "Fail to claim REDIS's pending");
- }
-
- catch (Exception ex)
- {
- _logger.LogError(ex, "Fail to claim pending");
- }
-
- #endregion // Exception Handling
-
- return Array.Empty();
- }
-
- #endregion // ClaimStaleMessages
-
- #region AckAsync
-
- // Acknowledge event handling (prevent re-consuming of the message).
- async ValueTask AckAsync(RedisValue messageId)
- {
- try
- {
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- // release the event (won't handle again in the future)
- long id = await db.StreamAcknowledgeAsync(key,
- plan.ConsumerGroup,
- messageId,
- flags: CommandFlags.DemandMaster);
- }
- catch (Exception)
- { // TODO: [bnaya 2020-10] do better handling (re-throw / swallow + reason) currently logged at the wrapping class
- throw;
- }
- }
-
- #endregion // AckAsync
-
- #region CancelAsync
-
- // Cancels the asynchronous.
- ValueTask CancelAsync(IEnumerable messageIds)
- {
- // no way to release consumed item back to the stream
- //try
- //{
- // // release the event (won't handle again in the future)
- // await db.StreamClaimIdsOnlyAsync(key,
- // plan.ConsumerGroup,
- // RedisValue.Null,
- // 0,
- // messageIds.ToArray(),
- // flags: CommandFlags.DemandMaster);
- //}
- //catch (Exception)
- //{ // TODO: [bnaya 2020-10] do better handling (re-throw / swallow + reason) currently logged at the wrapping class
- // throw;
- //}
- return ValueTask.CompletedTask;
-
- }
-
- #endregion // CancelAsync
-
- #region ReleaseAsync
-
-
- // Releases the messages (work around).
- async Task ReleaseAsync(RedisValue[] freeTargets)
- {
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- await db.StreamClaimAsync(plan.Key(),
- plan.ConsumerGroup,
- RedisChannelConstants.NONE_CONSUMER,
- 1,
- freeTargets,
- flags: CommandFlags.DemandMaster);
- await Task.Delay(releaseDelay, cancellationToken);
- if (releaseDelay < MAX_RELEASE_DELAY)
- releaseDelay = Math.Min(releaseDelay * 2, MAX_RELEASE_DELAY);
-
- if (bachSize == options.BatchSize)
- bachSize = 1;
- else
- bachSize = Math.Min(bachSize * 2, options.BatchSize);
- }
-
- #endregion // ReleaseAsync
- }
-
- #endregion // SubsribeShardAsync
-
- #region GetByIdAsync
-
- ///
- /// Gets announcement data by id.
- ///
- /// The entry identifier.
- /// The plan.
- /// The cancellation token.
- ///
- /// IConsumerChannelProvider.GetAsync of [{entryId}] from [{plan.Partition}->{plan.Shard}] return nothing.
- /// IConsumerChannelProvider.GetAsync of [{entryId}] from [{plan.Partition}->{plan.Shard}] was expecting single result but got [{entries.Length}] results
- async ValueTask IConsumerChannelProvider.GetByIdAsync(
- EventKey entryId,
- IConsumerPlan plan,
- CancellationToken cancellationToken)
- {
- string mtdName = $"{nameof(IConsumerChannelProvider)}.{nameof(IConsumerChannelProvider.GetByIdAsync)}";
-
- try
- {
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- ILogger logger = plan.Logger;
- StreamEntry entry = await FindAsync(entryId);
-
- #region var announcement = new Announcement(...)
-
- Dictionary channelMeta = entry.Values.ToDictionary(m => m.Name, m => m.Value);
- string channelType = GetMeta(nameof(MetadataExtensions.Empty.ChannelType));
- string id = GetMeta(nameof(MetadataExtensions.Empty.MessageId));
- string operation = GetMeta(nameof(MetadataExtensions.Empty.Operation));
- long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
-
- #region string GetMeta(string propKey)
-
- string GetMeta(string propKey)
- {
- string? result = channelMeta[propKey];
- if (result == null) throw new ArgumentNullException(propKey);
- return result;
- }
-
- #endregion // string GetMeta(string propKey)
-
- DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
-#pragma warning disable CS8601 // Possible null reference assignment.
- var meta = new Metadata
- {
- MessageId = id,
- EventKey = entry.Id,
- Environment = plan.Environment,
- Partition = plan.Partition,
- Shard = plan.Shard,
- Operation = operation,
- ProducedAt = producedAt,
- ChannelType = channelType
- };
-#pragma warning restore CS8601 // Possible null reference assignment.
-
- Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
- Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
-
- var announcement = new Announcement
- {
- Metadata = meta,
- Segments = segmets,
- InterceptorsData = interceptions
- };
-
- #endregion // var announcement = new Announcement(...)
-
- return announcement;
-
- #region FindAsync
-
- async Task FindAsync(EventKey entryId)
- {
- string lookForId = (string)entryId;
- string key = plan.Key();
-
- string originId = lookForId;
- int len = originId.IndexOf('-');
- string fromPrefix = originId.Substring(0, len);
- long start = long.Parse(fromPrefix);
- string startPosition = (start - 1).ToString();
- int iteration = 0;
- for (int i = 0; i < READ_BY_ID_ITERATIONS; i++) // up to 1000 items
- {
- iteration++;
- StreamEntry[] entries = await db.StreamReadAsync(
- key,
- startPosition,
- READ_BY_ID_CHUNK_SIZE,
- CommandFlags.DemandMaster);
- if (entries.Length == 0)
- throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{key}] return nothing, start at ({startPosition}, iteration = {iteration}).");
- string k = string.Empty;
- foreach (StreamEntry e in entries)
- {
-#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
-#pragma warning disable CS8602 // Dereference of a possibly null reference.
- k = e.Id;
- string ePrefix = k.Substring(0, len);
-#pragma warning restore CS8602 // Dereference of a possibly null reference.
-#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
- long comp = long.Parse(ePrefix);
- if (comp < start)
- continue; // not there yet
- if (k == lookForId)
- {
- return e;
- }
- if (ePrefix != fromPrefix)
- throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{key}] return not exists.");
- }
- startPosition = k; // next batch will start from last entry
- }
- throw new KeyNotFoundException($"{mtdName} of [{lookForId}] from [{key}] return not found.");
- }
-
- #endregion // FindAsync
- }
- #region Exception Handling
-
- catch (Exception ex)
- {
- string key = plan.Key();
- _logger.LogError(ex.FormatLazy(), "{mtd} Failed: Entry [{entryId}] from [{key}] event stream",
- mtdName, entryId, key);
- throw;
- }
-
- #endregion // Exception Handling
- }
-
- #endregion // GetByIdAsync
-
- #region GetAsyncEnumerable
-
- ///
- /// Gets asynchronous enumerable of announcements.
- ///
- /// The plan.
- /// The options.
- /// The cancellation token.
- ///
- async IAsyncEnumerable IConsumerChannelProvider.GetAsyncEnumerable(
- IConsumerPlan plan,
- ConsumerAsyncEnumerableOptions? options,
- [EnumeratorCancellation] CancellationToken cancellationToken)
- {
- string mtdName = $"{nameof(IConsumerChannelProvider)}.{nameof(IConsumerChannelProvider.GetAsyncEnumerable)}";
-
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- ILogger logger = plan.Logger;
- var loop = AsyncLoop().WithCancellation(cancellationToken);
- await foreach (StreamEntry entry in loop)
- {
- if (cancellationToken.IsCancellationRequested) yield break;
-
- #region var announcement = new Announcement(...)
-
- Dictionary channelMeta = entry.Values.ToDictionary(m => m.Name, m => m.Value);
-#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
-#pragma warning disable CS8601 // Possible null reference assignment.
- string channelType = channelMeta[nameof(MetadataExtensions.Empty.ChannelType)];
- string id = channelMeta[nameof(MetadataExtensions.Empty.MessageId)];
- string operation = channelMeta[nameof(MetadataExtensions.Empty.Operation)];
- long producedAtUnix = (long)channelMeta[nameof(MetadataExtensions.Empty.ProducedAt)];
- DateTimeOffset producedAt = DateTimeOffset.FromUnixTimeSeconds(producedAtUnix);
- var meta = new Metadata
- {
- MessageId = id,
- EventKey = entry.Id,
- Environment = plan.Environment,
- Partition = plan.Partition,
- Shard = plan.Shard,
- Operation = operation,
- ProducedAt = producedAt
- };
-#pragma warning restore CS8601 // Possible null reference assignment.
-#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
- var filter = options?.OperationFilter;
- if (filter != null && !filter(meta))
- continue;
-
- Bucket segmets = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Segments);
- Bucket interceptions = await GetBucketAsync(plan, channelMeta, meta, EventBucketCategories.Interceptions);
-
- var announcement = new Announcement
- {
- Metadata = meta,
- Segments = segmets,
- InterceptorsData = interceptions
- };
-
- #endregion // var announcement = new Announcement(...)
-
- yield return announcement;
- };
-
- #region AsyncLoop
-
- async IAsyncEnumerable AsyncLoop()
- {
- string key = plan.Key();
-
- int iteration = 0;
- RedisValue startPosition = options?.From ?? BEGIN_OF_STREAM;
- TimeSpan delay = TimeSpan.Zero;
- while (true)
- {
- if (cancellationToken.IsCancellationRequested) yield break;
-
- iteration++;
- StreamEntry[] entries = await db.StreamReadAsync(
- key,
- startPosition,
- READ_BY_ID_CHUNK_SIZE,
- CommandFlags.DemandMaster);
- if (entries.Length == 0)
- {
- if (options?.ExitWhenEmpty ?? true) yield break;
- delay = await DelayIfEmpty(delay, cancellationToken);
- }
- string k = string.Empty;
- foreach (StreamEntry e in entries)
- {
- if (cancellationToken.IsCancellationRequested) yield break;
-
-#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
- k = e.Id;
-#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
- if (options?.To != null && string.Compare(options?.To, k) < 0)
- yield break;
- yield return e;
- }
- startPosition = k; // next batch will start from last entry
- }
- }
-
- #endregion // AsyncLoop
- }
-
- #endregion // GetAsyncEnumerable
-
- #region ValueTask GetBucketAsync(StorageType storageType) // local function
-
- ///
- /// Gets a data bucket.
- ///
- /// The plan.
- /// The channel meta.
- /// The meta.
- /// Type of the storage.
- ///
- private async ValueTask GetBucketAsync(
- IConsumerPlan plan,
- Dictionary channelMeta,
- Metadata meta,
- EventBucketCategories storageType)
- {
-
- IEnumerable strategies = await plan.StorageStrategiesAsync;
- strategies = strategies.Where(m => m.IsOfTargetType(storageType));
- Bucket bucket = Bucket.Empty;
- if (strategies.Any())
- {
- foreach (var strategy in strategies)
- {
- bucket = await strategy.LoadBucketAsync(meta, bucket, storageType, LocalGetProperty);
- }
- }
- else
- {
- bucket = await _defaultStorageStrategy.LoadBucketAsync(meta, bucket, storageType, LocalGetProperty);
- }
-
- return bucket;
-
-#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
-#pragma warning disable CS8603 // Possible null reference return.
- string LocalGetProperty(string k) => (string)channelMeta[k];
-#pragma warning restore CS8603 // Possible null reference return.
-#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
- }
-
- #endregion // ValueTask StoreBucketAsync(StorageType storageType) // local function
-
- #region DelayIfEmpty
-
- // avoiding system hit when empty (mitigation of self DDoS)
- private async Task DelayIfEmpty(TimeSpan previousDelay, CancellationToken cancellationToken)
- {
- var cfg = _setting.DelayWhenEmptyBehavior;
- var newDelay = cfg.CalcNextDelay(previousDelay);
- var limitDelay = Min(cfg.MaxDelay.TotalMilliseconds, newDelay.TotalMilliseconds);
- newDelay = TimeSpan.FromMilliseconds(limitDelay);
- await Task.Delay(newDelay, cancellationToken);
- return newDelay;
- }
-
- #endregion // DelayIfEmpty
-
- #region GetKeysUnsafeAsync
-
- ///
- /// Gets the keys unsafe asynchronous.
- ///
- /// The pattern.
- /// The cancellation token.
- ///
- public async IAsyncEnumerable GetKeysUnsafeAsync(
- string pattern,
- [EnumeratorCancellation] CancellationToken cancellationToken = default)
- {
- IConnectionMultiplexer multiplexer = await _connFactory.GetAsync();
- var distict = new HashSet();
- while (!cancellationToken.IsCancellationRequested)
- {
- foreach (EndPoint endpoint in multiplexer.GetEndPoints())
- {
- IServer server = multiplexer.GetServer(endpoint);
- // TODO: [bnaya 2020_09] check the pagination behavior
-#pragma warning disable CS8600 // Converting null literal or possible null value to non-nullable type.
-#pragma warning disable CS8604 // Possible null reference argument.
- await foreach (string key in server.KeysAsync(pattern: pattern))
- {
- if (distict.Contains(key))
- continue;
- distict.Add(key);
- yield return key;
- }
-#pragma warning restore CS8604 // Possible null reference argument.
-#pragma warning restore CS8600 // Converting null literal or possible null value to non-nullable type.
- }
- }
- }
-
- #endregion // GetKeysUnsafeAsync
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerProviderExtensions.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerProviderExtensions.cs
deleted file mode 100644
index ca951683..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/RedisConsumerProviderExtensions.cs
+++ /dev/null
@@ -1,115 +0,0 @@
-using Microsoft.Extensions.Configuration;
-using Microsoft.Extensions.DependencyInjection;
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-using Weknow.EventSource.Backbone.Building;
-using Weknow.EventSource.Backbone.Channels.RedisProvider;
-
-namespace Weknow.EventSource.Backbone
-{
- public static class RedisConsumerProviderExtensions
- {
- ///
- /// Uses REDIS consumer channel.
- ///
- /// The builder.
- /// The setting.
- /// The redis configuration.
- /// Environment keys of the credentials
- ///
- public static IConsumerStoreStrategyBuilder UseRedisChannel(
- this IConsumerBuilder builder,
- Func setting,
- Action? redisConfiguration = null,
- RedisCredentialsKeys credentialsKeys = default)
- {
- var stg = setting?.Invoke(RedisConsumerChannelSetting.Default);
- var channelBuilder = builder.UseChannel(LocalCreate);
- return channelBuilder;
-
- IConsumerChannelProvider LocalCreate(ILogger logger)
- {
- var channel = new RedisConsumerChannel(
- logger,
- redisConfiguration,
- stg,
- credentialsKeys);
- return channel;
- }
- }
-
- ///
- /// Uses REDIS consumer channel.
- ///
- /// The builder.
- /// The setting.
- /// Environment keys of the credentials
- ///
- public static IConsumerStoreStrategyBuilder UseRedisChannel(
- this IConsumerBuilder builder,
- RedisConsumerChannelSetting? setting = null,
- RedisCredentialsKeys credentialsKeys = default)
- {
- var cfg = setting ?? RedisConsumerChannelSetting.Default;
- var channelBuilder = builder.UseChannel(LocalCreate);
- return channelBuilder;
-
- IConsumerChannelProvider LocalCreate(ILogger logger)
- {
- var channel = new RedisConsumerChannel(
- logger,
- setting: cfg,
- credentialsKeys: credentialsKeys);
- return channel;
- }
- }
-
- ///
- /// Uses REDIS consumer channel.
- ///
- /// The builder.
- /// The redis client factory.
- /// The setting.
- ///
- public static IConsumerStoreStrategyBuilder UseRedisChannel(
- this IConsumerBuilder builder,
- IEventSourceRedisConnectionFacroty redisClientFactory,
- RedisConsumerChannelSetting? setting = null)
- {
- var cfg = setting ?? RedisConsumerChannelSetting.Default;
- var channelBuilder = builder.UseChannel(LocalCreate);
- return channelBuilder;
-
- IConsumerChannelProvider LocalCreate(ILogger logger)
- {
- var channel = new RedisConsumerChannel(
- redisClientFactory,
- logger,
- setting);
- return channel;
- }
- }
-
- ///
- /// Uses REDIS consumer channel.
- ///
- /// The builder.
- /// The service provider.
- /// The setting.
- ///
- /// redisClient
- public static IConsumerStoreStrategyBuilder UseRedisChannelInjection(
- this IConsumerBuilder builder,
- IServiceProvider serviceProvider,
- RedisConsumerChannelSetting? setting = null)
- {
- var connFactory = serviceProvider.GetService();
- if (connFactory == null)
- throw new RedisConnectionException(ConnectionFailureType.None, $"{nameof(IEventSourceRedisConnectionFacroty)} is not registerd, use services.{nameof(RedisDiExtensions.AddEventSourceRedisConnection)} in order to register it at Setup stage.");
- return builder.UseRedisChannel(connFactory, setting);
- }
-
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider.csproj b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider.csproj
deleted file mode 100644
index 32920e42..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider.csproj
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProviderr.xml b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProviderr.xml
deleted file mode 100644
index 6c625420..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/Weknow.EventSource.Backbone.Channels.RedisConsumerProviderr.xml
+++ /dev/null
@@ -1,286 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.RedisConsumerProvider
-
-
-
-
- The read by identifier chunk size.
- REDIS don't have option to read direct position (it read from a position, not includes the position itself),
- therefore read should start before the actual position.
-
-
-
-
- Initializes a new instance.
-
- The redis provider promise.
- The logger.
- The setting.
-
-
-
- Initializes a new instance.
-
- The redis database.
- The logger.
- The setting.
-
-
-
- Initializes a new instance.
-
- The logger.
- The configuration.
- The setting.
- The endpoint env key.
- The password env key.
-
-
-
- Subscribe to the channel for specific metadata.
-
- The consumer plan.
- The function.
- The options.
- The cancellation token.
-
- When completed
-
-
-
-
- Subscribe to all shards under a partition.
-
- The database.
- The consumer plan.
- The function.
- The options.
- The cancellation token.
-
- When completed
-
-
-
-
- Subscribe to specific shard.
-
- The database.
- The consumer plan.
- The function.
- The options.
- The cancellation token.
-
-
-
- Gets announcement data by id.
-
- The entry identifier.
- The plan.
- The cancellation token.
-
- IConsumerChannelProvider.GetAsync of [{entryId}] from [{plan.Partition}->{plan.Shard}] return nothing.
- IConsumerChannelProvider.GetAsync of [{entryId}] from [{plan.Partition}->{plan.Shard}] was expecting single result but got [{entries.Length}] results
-
-
-
- Gets a data bucket.
-
- The plan.
- The channel meta.
- The meta.
- Type of the storage.
-
-
-
-
- Gets the keys unsafe asynchronous.
-
- The pattern.
- The cancellation token.
-
-
-
-
- Behavior of delay when empty
-
-
-
-
- Gets or sets the maximum delay.
-
-
-
-
- Gets or sets the next delay.
-
-
-
-
- Represent specific setting of the consumer channel
-
-
-
-
- Define when to claim stale (long waiting) messages from other consumers
-
-
-
-
- Gets or sets the resilience policy.
-
-
-
-
- Behavior of delay when empty
-
-
-
-
- Performs an implicit conversion.
-
- The policy.
-
- The result of the conversion.
-
-
-
-
- Define when to claim stale (long waiting) messages from other consumers
-
-
-
-
- Initializes a new instance.
-
- The policy.
-
-
-
- Initializes a new instance.
-
- The on break.
- The on reset.
- The on half open.
- The on retry.
-
-
-
- Gets or sets the batch reading policy.
-
-
-
-
- Performs an implicit conversion.
-
- The policy.
-
- The result of the conversion.
-
-
-
-
- Performs an implicit conversion.
-
- The instance.
-
- The result of the conversion.
-
-
-
-
- Define when to claim stale (long waiting) messages from other consumers
-
-
-
-
- Empty batch count define number of empty fetching cycle in a row
- which will trigger operation of trying to get stale messages from other consumers.
-
-
-
-
- The minimum message idle time to allow the reassignment of the message(s).
-
-
-
-
- Responsible to save information to REDIS hash storage.
- The information can be either Segmentation or Interception.
- When adding it via the builder it can be arrange in a chain in order of having
- 'Chain of Responsibility' for saving different parts into different storage (For example GDPR's PII).
- Alternative, chain can serve as a cache layer.
-
-
-
-
- Initializes a new instance.
-
- The database task.
-
-
-
- Load the bucket information.
-
- The meta fetch provider.
- The current bucket (previous item in the chain).
- The type of the storage.
- The get property.
- The cancellation.
-
- Either Segments or Interceptions.
-
-
-
-
-
- Uses REDIS consumer channel.
-
- The builder.
- The setting.
- The redis configuration.
- The endpoint env key.
- The password env key.
-
-
-
-
- Uses REDIS consumer channel.
-
- The builder.
- The setting.
- The endpoint env key.
- The password env key.
-
-
-
-
- Uses REDIS consumer channel.
-
- The builder.
- The redis client.
- The setting.
-
-
-
-
- Uses REDIS consumer channel.
-
- The builder.
- The redis client.
- The setting.
-
-
-
-
- Uses REDIS consumer channel.
-
- The builder.
- The service provider.
- The setting.
-
- redisClient
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/icon.png b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/icon.png
deleted file mode 100644
index d6811ad8..00000000
Binary files a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisConsumerProvider/icon.png and /dev/null differ
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs
deleted file mode 100644
index 7cfe0213..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProducerChannel.cs
+++ /dev/null
@@ -1,193 +0,0 @@
-using System.Collections.Immutable;
-using System.Diagnostics;
-using System.Text.Json;
-
-using Microsoft.Extensions.Logging;
-
-using OpenTelemetry;
-
-using Polly;
-
-using StackExchange.Redis;
-
-using static Weknow.EventSource.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
-
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider
-{
- internal class RedisProducerChannel : IProducerChannelProvider
- {
- private static readonly ActivitySource ACTIVITY_SOURCE = new ActivitySource(EventSourceConstants.REDIS_PRODUCER_CHANNEL_SOURCE);
- private readonly ILogger _logger;
- private readonly AsyncPolicy _resiliencePolicy;
- private readonly IEventSourceRedisConnectionFacroty _connFactory;
- private readonly IProducerStorageStrategy _defaultStorageStrategy;
- private const string META_SLOT = "____";
-
- #region Ctor
-
- ///
- /// Initializes a new instance.
- ///
- /// The redis database promise.
- /// The logger.
- /// The resilience policy for retry.
- public RedisProducerChannel(
- IEventSourceRedisConnectionFacroty redisFactory,
- ILogger logger,
- AsyncPolicy? resiliencePolicy)
- {
- _connFactory = redisFactory;
- _logger = logger;
- _resiliencePolicy = resiliencePolicy ??
- Policy.Handle()
- .RetryAsync(3);
- _defaultStorageStrategy = new RedisHashStorageStrategy(_connFactory, logger);
- }
-
-
- #endregion // Ctor
-
- #region GetDB
-
- ///
- /// Gets the database.
- ///
- /// The redis connection factory.
- ///
- private static async Task GetDB(IEventSourceRedisConnectionFacroty redisConnFactory)
- {
- var mp = await redisConnFactory.GetAsync();
- return mp.GetDatabase();
- }
-
- #endregion // GetDB
-
- #region SendAsync
-
- ///
- /// Sends raw announcement.
- ///
- /// The raw announcement data.
- /// The storage strategy.
- ///
- /// Return the message id
- ///
- public async ValueTask SendAsync(
- Announcement payload,
- ImmutableArray storageStrategy)
- {
- Metadata meta = payload.Metadata;
- string id = meta.MessageId;
-
- #region var entries = new NameValueEntry[]{...}
-
- string metaJson = JsonSerializer.Serialize(meta, EventSourceOptions.FullSerializerOptions);
-
- // local method
- NameValueEntry KV(RedisValue key, RedisValue value) => new NameValueEntry(key, value);
- ImmutableArray commonEntries = ImmutableArray.Create(
- KV(nameof(meta.MessageId), id),
- KV(nameof(meta.Operation), meta.Operation),
- KV(nameof(meta.ProducedAt), meta.ProducedAt.ToUnixTimeSeconds()),
- KV(nameof(meta.ChannelType), CHANNEL_TYPE),
- KV(nameof(meta.Origin), meta.Origin.ToString()),
- KV(META_SLOT, metaJson)
- );
-
- #endregion // var entries = new NameValueEntry[]{...}
-
- RedisValue messageId = await _resiliencePolicy.ExecuteAsync(LocalStreamAddAsync);
-
- return (string?)messageId ?? "0000000000000-0";
-
- #region LocalStreamAddAsync
-
- async Task LocalStreamAddAsync()
- {
- await LocalStoreBucketAsync(EventBucketCategories.Segments);
- await LocalStoreBucketAsync(EventBucketCategories.Interceptions);
-
- var telemetryBuilder = commonEntries.ToBuilder();
- var activityName = $"{meta.Operation} produce";
- using Activity? activity = ACTIVITY_SOURCE.StartActivity(activityName, ActivityKind.Producer);
- activity.InjectSpan(meta, telemetryBuilder, LocalInjectTelemetry);
- meta.InjectTelemetryTags(activity);
- var entries = telemetryBuilder.ToArray();
-
- try
- {
- IConnectionMultiplexer conn = await _connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- using var scope = SuppressInstrumentationScope.Begin();
- var k = meta.Key();
- var result = await db.StreamAddAsync(k, entries,
- flags: CommandFlags.DemandMaster);
- return result;
- }
- #region Exception Handling
-
- catch (RedisConnectionException ex)
- {
- _logger.LogError(ex, "REDIS Connection Failure: push event [{id}] into the [{partition}->{shard}] stream: {operation}",
- meta.MessageId, meta.Partition, meta.Shard, meta.Operation);
- throw;
- }
- catch (Exception ex)
- {
- _logger.LogError(ex, "Fail to push event [{id}] into the [{partition}->{shard}] stream: {operation}",
- meta.MessageId, meta.Partition, meta.Shard, meta.Operation);
- throw;
- }
-
- #endregion // Exception Handling
-
- #region ValueTask StoreBucketAsync(StorageType storageType) // local function
-
- async ValueTask LocalStoreBucketAsync(EventBucketCategories storageType)
- {
- var strategies = storageStrategy.Where(m => m.IsOfTargetType(storageType));
- Bucket bucket = storageType == EventBucketCategories.Segments ? payload.Segments : payload.InterceptorsData;
- if (strategies.Any())
- {
- foreach (var strategy in strategies)
- {
- await SaveBucketAsync(strategy);
- }
- }
- else
- {
- await SaveBucketAsync(_defaultStorageStrategy);
- }
-
- async ValueTask SaveBucketAsync(IProducerStorageStrategy strategy)
- {
- IImmutableDictionary metaItems =
- await strategy.SaveBucketAsync(id, bucket, storageType, meta);
- foreach (var item in metaItems)
- {
- commonEntries = commonEntries.Add(KV(item.Key, item.Value));
- }
-
- }
- }
-
- #endregion // ValueTask StoreBucketAsync(StorageType storageType) // local function
- #region LocalInjectTelemetry
-
- void LocalInjectTelemetry(
- ImmutableArray.Builder builder,
- string key,
- string value)
- {
- builder.Add(KV(key, value));
- }
-
- #endregion // LocalInjectTelemetry
- }
-
- #endregion // LocalStreamAddAsync
- }
-
- #endregion // SendAsync
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProviderExtensions.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProviderExtensions.cs
deleted file mode 100644
index fb5f58e9..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/RedisProviderExtensions.cs
+++ /dev/null
@@ -1,100 +0,0 @@
-using Microsoft.Extensions.Configuration;
-using Microsoft.Extensions.DependencyInjection;
-using Microsoft.Extensions.Logging;
-
-using OpenTelemetry.Trace;
-
-using Polly;
-
-using StackExchange.Redis;
-
-using Weknow.EventSource.Backbone.Channels.RedisProvider;
-using Weknow.EventSource.Backbone.Private;
-
-namespace Weknow.EventSource.Backbone
-{
- public static class RedisProviderExtensions
- {
- ///
- /// Adds the event producer telemetry source (will result in tracing the producer).
- ///
- /// The builder.
- ///
- public static TracerProviderBuilder AddEventProducerTelemetry(this TracerProviderBuilder builder) => builder.AddSource(nameof(RedisProducerChannel));
-
- ///
- /// Uses REDIS producer channel.
- ///
- /// The builder.
- /// The configuration.
- /// The resilience policy.
- /// Environment keys of the credentials
- ///
- ///
- public static IProducerStoreStrategyBuilder UseRedisChannel(
- this IProducerBuilder builder,
- Action? configuration = null,
- AsyncPolicy? resiliencePolicy = null,
- RedisCredentialsKeys credentialsKeys = default)
- {
- var result = builder.UseChannel(LocalCreate);
- return result;
-
- IProducerChannelProvider LocalCreate(ILogger logger)
- {
- var connFactory = new EventSourceRedisConnectionFacroty(logger, configuration, credentialsKeys);
- var channel = new RedisProducerChannel(
- connFactory,
- logger ?? EventSourceFallbakLogger.Default,
- resiliencePolicy);
- return channel;
- }
- }
-
- ///
- /// Uses REDIS producer channel.
- ///
- /// The builder.
- /// The redis database.
- /// The resilience policy.
- ///
- ///
- public static IProducerStoreStrategyBuilder UseRedisChannel(
- this IProducerBuilder builder,
- IEventSourceRedisConnectionFacroty redisConnectionFactory,
- AsyncPolicy? resiliencePolicy = null)
- {
- var result = builder.UseChannel(LocalCreate);
- return result;
-
- IProducerChannelProvider LocalCreate(ILogger logger)
- {
- var channel = new RedisProducerChannel(
- redisConnectionFactory,
- logger ?? EventSourceFallbakLogger.Default,
- resiliencePolicy);
- return channel;
- }
- }
-
- ///
- /// Uses REDIS producer channel.
- ///
- /// The builder.
- /// The service provider.
- /// The resilience policy.
- ///
- public static IProducerStoreStrategyBuilder UseRedisChannelInjection(
- this IProducerBuilder builder,
- IServiceProvider serviceProvider,
- AsyncPolicy? resiliencePolicy = null)
- {
- ILogger logger = serviceProvider.GetService>() ?? throw new ArgumentNullException();
-
- var connFactory = serviceProvider.GetService();
- if (connFactory == null)
- throw new RedisConnectionException(ConnectionFailureType.None, $"{nameof(IEventSourceRedisConnectionFacroty)} is not registerd, use services.{nameof(RedisDiExtensions.AddEventSourceRedisConnection)} in order to register it at Setup stage.");
- return builder.UseRedisChannel(connFactory, resiliencePolicy);
- }
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.csproj b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.csproj
deleted file mode 100644
index ea775aff..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.csproj
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.xml b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.xml
deleted file mode 100644
index df46b984..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/Weknow.EventSource.Backbone.Channels.RedisProducerProvider.xml
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.RedisProducerProvider
-
-
-
-
- Initializes a new instance.
-
- The redis database promise.
- The logger.
- The resilience policy for retry.
-
-
-
- Initializes a new instance.
-
- The redis database promise.
- The logger.
- The resilience policy for retry.
-
-
-
- Initializes a new instance.
-
- The redis database.
- The logger.
- The resilience policy for retry.
-
-
-
- Initializes a new instance.
-
- The logger.
- The configuration.
- The resilience policy for retry.
- The endpoint env key.
- The password env key.
-
-
-
- Gets the database.
-
- The redis.
-
-
-
-
- Sends raw announcement.
-
- The raw announcement data.
- The storage strategy.
-
- Return the message id
-
-
-
-
- Responsible to save information to REDIS hash storage.
- The information can be either Segmentation or Interception.
- When adding it via the builder it can be arrange in a chain in order of having
- 'Chain of Responsibility' for saving different parts into different storage (For example GDPR's PII).
- Alternative, chain can serve as a cache layer.
-
-
-
-
- Initializes a new instance.
-
- The database task.
- The logger.
-
-
-
- Saves the bucket information.
-
- The identifier.
- Either Segments or Interceptions.
- The type.
- The metadata.
- The cancellation.
-
- Array of metadata entries which can be used by the consumer side storage strategy, in order to fetch the data.
-
-
-
-
- Adds the event producer telemetry source (will result in tracing the producer).
-
- The builder.
-
-
-
-
- Uses REDIS producer channel.
-
- The builder.
- The configuration.
- The resilience policy.
- The endpoint env key.
- The password env key.
-
-
-
-
-
- Uses REDIS producer channel.
-
- The builder.
- The redis database.
- The resilience policy.
-
-
-
-
-
- Uses REDIS producer channel.
-
- The builder.
- The redis database.
- The resilience policy.
-
-
-
-
-
- Uses REDIS producer channel.
-
- The builder.
- The service provider.
- The resilience policy.
-
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/icon.png b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/icon.png
deleted file mode 100644
index d6811ad8..00000000
Binary files a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProducerProvider/icon.png and /dev/null differ
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFacroty.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFacroty.cs
deleted file mode 100644
index 9eade314..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/EventSourceRedisConnectionFacroty.cs
+++ /dev/null
@@ -1,72 +0,0 @@
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Event Source connection (for IoC)
- /// Because IConnectionMultiplexer may be used by other component,
- /// It's more clear to wrap the IConnectionMultiplexer for easier resove by IoC.
- /// This factory is also responsible of the connection health.
- /// It will return same connection as long as it healthy.
- ///
- public sealed class EventSourceRedisConnectionFacroty : RedisConnectionFacrotyBase, IEventSourceRedisConnectionFacroty
- {
- #region Ctor
-
- #region Overloads
-
- ///
- /// Constructor
- ///
- ///
- ///
- /// Environment keys of the credentials
- public EventSourceRedisConnectionFacroty(
- ILogger logger,
- Action? configuration = null,
- RedisCredentialsKeys credentialsKeys = default
- ) : this((ILogger)logger, configuration, credentialsKeys)
- {
- }
-
- #endregion // Overloads
-
- ///
- /// Constructor
- ///
- ///
- ///
- /// Environment keys of the credentials
- public EventSourceRedisConnectionFacroty(
- ILogger logger,
- Action? configuration = null,
- RedisCredentialsKeys credentialsKeys = default) : base(logger, configuration, credentialsKeys)
- {
- //CredentialsKeys = credentialsKeys;
- }
-
-
- #endregion // Ctor
-
- #region Kind
-
- ///
- /// Gets the kind.
- ///
- protected override string Kind => "Event-Sourcing";
-
- #endregion // Kind
-
- //#region CredentialsKeys
-
- /////
- ///// Gets the credentials keys.
- /////
- //protected override RedisCredentialsKeys CredentialsKeys { get; }
-
- //#endregion // CredentialsKeys
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFacroty.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFacroty.cs
deleted file mode 100644
index 8db35333..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IEventSourceRedisConnectionFacroty.cs
+++ /dev/null
@@ -1,9 +0,0 @@
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Connection factory
- ///
- public interface IEventSourceRedisConnectionFacroty : IRedisConnectionFacrotyBase
- {
- }
-}
\ No newline at end of file
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IRedisConnectionFacrotyBase.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IRedisConnectionFacrotyBase.cs
deleted file mode 100644
index 467da1e7..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/IRedisConnectionFacrotyBase.cs
+++ /dev/null
@@ -1,19 +0,0 @@
-using StackExchange.Redis;
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Connection factory
- ///
- public interface IRedisConnectionFacrotyBase
- {
- ///
- /// Get a valid connection
- ///
- Task GetAsync();
- ///
- /// Get database
- ///
- Task GetDatabaseAsync();
- }
-}
\ No newline at end of file
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs
deleted file mode 100644
index 659f6216..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisClientFactory.cs
+++ /dev/null
@@ -1,191 +0,0 @@
-using System.Reflection;
-using System.Text;
-
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-using static Weknow.EventSource.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// REDIS client factory
- ///
- public static class RedisClientFactory
- {
- private static int _index = 0;
- private const string CONNECTION_NAME_PATTERN = "ev-src:{0}:{1}:{2}";
- private static readonly string? ASSEMBLY_NAME = Assembly.GetEntryAssembly()?.GetName()?.Name?.ToDash();
- private static readonly Version? ASSEMBLY_VERSION = Assembly.GetEntryAssembly()?.GetName()?.Version;
-
- ///
- /// Blocking Create REDIS client.
- /// Exist only for code which don't support async (like ASP.NET setup (AddSingleton))
- ///
- /// The configuration.
- /// The credential.
- ///
- /// Fail to establish REDIS connection
- /// Fail to establish REDIS connection
- public static IConnectionMultiplexer CreateProviderBlocking(
- Action? configuration = null,
- RedisCredentialsKeys credential = default)
- {
- var task = CreateProviderAsync(null, configuration, credential);
- return task.Result;
- }
-
- ///
- /// Blocking Create REDIS client.
- /// Exist only for code which don't support async (like ASP.NET setup (AddSingleton))
- ///
- /// The logger.
- /// The configuration.
- /// The credential.
- ///
- /// Fail to establish REDIS connection
- /// Fail to establish REDIS connection
- public static IConnectionMultiplexer CreateProviderBlocking(
- ILogger logger,
- Action? configuration = null,
- RedisCredentialsKeys credential = default)
- {
- var task = CreateProviderAsync(logger, configuration, credential);
- return task.Result;
- }
-
- ///
- /// Create REDIS client.
- ///
- /// The configuration.
- /// The credential.
- ///
- /// Fail to establish REDIS connection
- /// Fail to establish REDIS connection
- public static Task CreateProviderAsync(
- Action? configuration = null,
- RedisCredentialsKeys credential = default)
- {
- return CreateProviderAsync(null, configuration, credential);
- }
-
-
- ///
- /// Create REDIS client.
- ///
- /// The logger.
- /// The configuration.
- /// The credential's environment keys.
- ///
- ///
- ///
- /// Fail to establish REDIS connection
- /// Fail to establish REDIS connection
- public static async Task CreateProviderAsync(
- ILogger? logger,
- Action? configuration = null,
- RedisCredentialsKeys credential = default)
- {
- string endpointKey = credential.EndpointKey ?? END_POINT_KEY;
- string passwordKey = credential.PasswordKey ?? PASSWORD_KEY;
- string? endpoint = Environment.GetEnvironmentVariable(endpointKey);
-
- try
- {
- if (endpoint == null)
- {
- #region Throw + Log
-
- if (logger != null)
- logger.LogError("REDIS CONNECTION: ENDPOINT [ENV variable: {endpointKey}] is missing", endpointKey);
- else
- Console.WriteLine($"REDIS CONNECTION: ENDPOINT [ENV variable: {endpointKey}] is missing");
- throw new KeyNotFoundException($"REDIS KEY [ENV variable: {endpointKey}] is missing");
-
- #endregion // Throw + Log
- }
-
- string? password = Environment.GetEnvironmentVariable(passwordKey);
-
- var sb = new StringBuilder();
- var writer = new StringWriter(sb);
-
- // https://stackexchange.github.io/StackExchange.Redis/Configuration.html
- var redisConfiguration = ConfigurationOptions.Parse(endpoint);
- redisConfiguration.ClientName = string.Format(
- CONNECTION_NAME_PATTERN,
- ASSEMBLY_NAME,
- ASSEMBLY_VERSION,
- Interlocked.Increment(ref _index));
-
- configuration?.Invoke(redisConfiguration);
- redisConfiguration.Password = password;
- // keep retry to get connection on failure
- redisConfiguration.AbortOnConnectFail = false;
- //redisConfiguration.ConnectTimeout = 15;
- //redisConfiguration.SyncTimeout = 10;
- //redisConfiguration.AsyncTimeout = 10;
- //redisConfiguration.DefaultDatabase = Debugger.IsAttached ? 1 : null;
-
-
- IConnectionMultiplexer redis = await ConnectionMultiplexer.ConnectAsync(redisConfiguration, writer);
- if (logger != null)
- logger.LogInformation("REDIS Connection [{envKey}]: {info} succeed", endpointKey, sb);
- else
- Console.WriteLine($"REDIS Connection [{endpointKey}] succeed: {sb}");
- redis.ConnectionFailed += OnConnectionFailed;
- redis.ErrorMessage += OnConnErrorMessage;
- redis.InternalError += OnInternalConnError;
-
- return redis;
- }
- catch (Exception ex)
- {
- if (logger != null)
- logger.LogError(ex.FormatLazy(), "REDIS CONNECTION ERROR");
- else
- Console.WriteLine($"REDIS CONNECTION ERROR: {ex.FormatLazy()}");
- throw;
- }
-
- #region Event Handlers
-
- void OnInternalConnError(object? sender, InternalErrorEventArgs e)
- {
- if (logger != null)
- {
- logger.LogError(e.Exception, "REDIS Connection internal failure: Failure type = {typeOfConnection}, Origin = {typeOfFailure}",
- e.ConnectionType, e.Origin);
- }
- else
- Console.WriteLine($"REDIS Connection internal failure: Failure type = {e.ConnectionType}, Origin = {e.Origin}");
- }
-
- void OnConnErrorMessage(object? sender, RedisErrorEventArgs e)
- {
- if (logger != null)
- {
- logger.LogWarning("REDIS Connection error: {message}",
- e.Message);
- }
- else
- Console.WriteLine($"REDIS Connection error: {e.Message}");
- }
-
-
- void OnConnectionFailed(object? sender, ConnectionFailedEventArgs e)
- {
- if (logger != null)
- {
- logger.LogError(e.Exception, "REDIS Connection failure: Failure type = {typeOfConnection}, Failure type = {typeOfFailure}", e.ConnectionType, e.FailureType);
- }
- else
- Console.WriteLine($"REDIS Connection failure: Failure type = {e.ConnectionType}, Failure type = {e.FailureType}");
- }
-
- #endregion // Event Handlers
-
- }
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisConnectionFacrotyBase.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisConnectionFacrotyBase.cs
deleted file mode 100644
index f2361057..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisConnectionFacrotyBase.cs
+++ /dev/null
@@ -1,200 +0,0 @@
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Event Source connection (for IoC)
- /// Because IConnectionMultiplexer may be used by other component,
- /// It's more clear to wrap the IConnectionMultiplexer for easier resove by IoC.
- /// This factory is also responsible of the connection health.
- /// It will return same connection as long as it healthy.
- ///
- public abstract class RedisConnectionFacrotyBase : IRedisConnectionFacrotyBase, IDisposable, IAsyncDisposable
- {
- private const int CLOSE_DELEY_MILLISECONDS = 5000;
- private Task _redisTask;
- private readonly ILogger _logger;
- private readonly Action? _configuration;
- private readonly RedisCredentialsKeys _credentialsKeys;
- private readonly AsyncLock _lock = new AsyncLock(TimeSpan.FromSeconds(CLOSE_DELEY_MILLISECONDS));
- private DateTime _lastResetConnection = DateTime.Now;
- private int _reconnectTry = 0;
-
- #region Ctor
-
- #region Overloads
-
- ///
- /// Constructor
- ///
- /// The logger.
- /// The configuration.
- /// The credentials keys.
- public RedisConnectionFacrotyBase(
- ILogger logger,
- Action? configuration = null,
- RedisCredentialsKeys credentialsKeys = default
- ) : this((ILogger)logger, configuration, credentialsKeys)
- {
- }
-
- #endregion // Overloads
-
- ///
- /// Constructor
- ///
- /// The logger.
- /// The configuration.
- /// The credentials keys.
- public RedisConnectionFacrotyBase(
- ILogger logger,
- Action? configuration = null,
- RedisCredentialsKeys credentialsKeys = default)
- {
- _logger = logger;
- _configuration = configuration;
- _credentialsKeys = credentialsKeys;
- _redisTask = RedisClientFactory.CreateProviderAsync(logger, configuration, credentialsKeys);
- }
-
-
- #endregion // Ctor
-
- //#region CredentialsKeys
-
- /////
- ///// Gets the credentials keys.
- /////
- //protected abstract RedisCredentialsKeys CredentialsKeys { get; }
-
- //#endregion // CredentialsKeys
-
- #region Kind
-
- ///
- /// Gets the kind.
- ///
- protected abstract string Kind { get; }
-
- #endregion // Kind
-
- #region GetAsync
-
- ///
- /// Get a valid connection
- ///
- async Task IRedisConnectionFacrotyBase.GetAsync()
- {
- var conn = await _redisTask;
- if (conn.IsConnected)
- return conn;
- string status = conn.GetStatus();
- _logger.LogWarning("REDIS Connection [{kind}] [{ClientName}]: status = [{status}]",
- Kind,
- conn.ClientName, status);
- var disp = await _lock.AcquireAsync();
- using (disp)
- {
- conn = await _redisTask;
- if (conn.IsConnected)
- return conn;
- int tryNumber = Interlocked.Increment(ref _reconnectTry);
- _logger.LogWarning("[{kind}] Reconnecting to REDIS: try=[{tryNumber}], client name=[{clientName}]",
- Kind, tryNumber, conn.ClientName);
- var duration = DateTime.Now - _lastResetConnection;
- if (duration > TimeSpan.FromSeconds(5))
- {
- _lastResetConnection = DateTime.Now;
- var cn = conn;
- Task _ = Task.Delay(CLOSE_DELEY_MILLISECONDS).ContinueWith(_ => cn.CloseAsync());
- _redisTask = RedisClientFactory.CreateProviderAsync(_logger, _configuration, _credentialsKeys);
- var newConn = await _redisTask;
- return newConn;
- }
- return conn;
- }
- }
-
- #endregion // GetAsync
-
- #region GetDatabaseAsync
-
- ///
- /// Get database
- ///
- async Task IRedisConnectionFacrotyBase.GetDatabaseAsync()
- {
- IRedisConnectionFacrotyBase self = this;
- IConnectionMultiplexer conn = await self.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- return db;
- }
-
- #endregion // GetDatabaseAsync
-
- #region Dispose (pattern)
-
- ///
- /// Disposed indication
- ///
- public bool Disposed { get; private set; }
-
- ///
- /// Dispose
- ///
- ///
- private void Dispose(bool disposing)
- {
- _logger.LogWarning("REDIS [{kind}]: Disposing connection", Kind);
- if (!Disposed)
- {
- var conn = _redisTask.Result;
- conn.Dispose();
- Disposed = true;
- OnDispose(disposing);
- }
- }
-
- ///
- /// Dispose
- ///
- public void Dispose()
- {
- // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
- Dispose(disposing: true);
- GC.SuppressFinalize(this);
- }
-
- ///
- /// Called when [dispose].
- ///
- /// if set to true [disposing].
- ///
- public virtual void OnDispose(bool disposing) { }
-
- ///
- /// Dispose
- ///
- ///
- public async ValueTask DisposeAsync()
- {
- _logger.LogWarning("REDIS [{kind}]: Disposing connection (async)", Kind);
- var redis = await _redisTask;
- redis.Dispose();
- }
-
- ///
- /// Finalizer
- ///
- ~RedisConnectionFacrotyBase()
- {
- // Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
- Dispose(disposing: false);
- }
-
- #endregion // Dispose (pattern)
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsKeys.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsKeys.cs
deleted file mode 100644
index 50c4ef97..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Factory/RedisCredentialsKeys.cs
+++ /dev/null
@@ -1,26 +0,0 @@
-using static Weknow.EventSource.Backbone.Channels.RedisProvider.Common.RedisChannelConstants;
-
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Environment keys for REDIS's credentials
- ///
- public readonly record struct RedisCredentialsKeys
- {
- public RedisCredentialsKeys()
- {
- EndpointKey = END_POINT_KEY;
- PasswordKey = PASSWORD_KEY;
- }
-
- ///
- /// Endpoint Key
- ///
- public string EndpointKey { get; init; }
- ///
- /// Password Key
- ///
- public string PasswordKey { get; init; }
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs
deleted file mode 100644
index 5a4c107d..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisChannelConstants.cs
+++ /dev/null
@@ -1,25 +0,0 @@
-namespace Weknow.EventSource.Backbone.Channels.RedisProvider.Common
-{
- public static class RedisChannelConstants
- {
- public const string CHANNEL_TYPE = "REDIS Channel V1";
- public const string META_ARRAY_SEPARATOR = "~|~";
-
- public const string END_POINT_KEY = "REDIS_EVENT_SOURCE_ENDPOINT";
- public const string PASSWORD_KEY = "REDIS_EVENT_SOURCE_PASS";
-
- ///
- /// a work around used to release messages back to the stream (consumer)
- ///
- public const string NONE_CONSUMER = "__NONE_CUNSUMER__";
-
- public static class MetaKeys
- {
- public const string SegmentsKeys = "segments-keys";
- public const string InterceptorsKeys = "interceptors-keys";
- public const string TelemetryBaggage = "telemetry-baggage";
- public const string TelemetrySpanId = "telemetry-span-id";
- public const string TelemetryTraceId = "telemetry-trace-id";
- }
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs
deleted file mode 100644
index a0ae8590..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisCommonProviderExtensions.cs
+++ /dev/null
@@ -1,154 +0,0 @@
-using Microsoft.Extensions.Logging;
-
-using StackExchange.Redis;
-
-namespace Weknow.EventSource.Backbone.Private
-{
- ///
- /// Redis common provider extensions
- ///
- public static class RedisCommonProviderExtensions
- {
- private const int MAX_DELAY = 15_000;
- private const int KEY_NOT_EXISTS_DELAY = 3_000;
-
- private static readonly AsyncLock _lock = new AsyncLock(TimeSpan.FromSeconds(20));
-
- #region CreateConsumerGroupIfNotExistsAsync
-
- ///
- /// Creates the consumer group if not exists asynchronous.
- ///
- /// The connection factory.
- /// The event source key.
- /// The consumer group.
- /// The logger.
- ///
- public static async Task CreateConsumerGroupIfNotExistsAsync(
- this IEventSourceRedisConnectionFacroty connFactory,
- string eventSourceKey,
- string consumerGroup,
- ILogger logger)
- {
- StreamGroupInfo[] groupsInfo = Array.Empty();
-
- int delay = 0;
- bool exists = false;
- int tryNumber = 0;
- while (groupsInfo.Length == 0)
- {
- tryNumber++;
-
- IConnectionMultiplexer conn = await connFactory.GetAsync();
- IDatabaseAsync db = conn.GetDatabase();
- try
- {
- #region Validation (if key exists)
-
- if (!await db.KeyExistsAsync(eventSourceKey,
- flags: CommandFlags.DemandMaster))
- {
- await Task.Delay(KEY_NOT_EXISTS_DELAY);
- logger.LogDebug("Key not exists (yet): {info}", CurrentInfo());
- continue;
- }
-
- #endregion // Validation (if key exists)
-
- #region delay on retry
-
- if (delay == 0)
- delay = 4;
- else
- {
- delay = Math.Min(delay * 2, MAX_DELAY);
- await Task.Delay(delay);
- if (tryNumber % 10 == 0)
- {
- logger.LogWarning("Create Consumer Group If Not Exists: still waiting {info}", CurrentInfo());
- }
- }
-
-
- #endregion // delay on retry
-
- using var lk = await _lock.AcquireAsync();
- groupsInfo = await db.StreamGroupInfoAsync(
- eventSourceKey,
- flags: CommandFlags.DemandMaster);
- exists = groupsInfo.Any(m => m.Name == consumerGroup);
- }
- #region Exception Handling
-
- catch (RedisServerException ex)
- {
- if (await db.KeyExistsAsync(eventSourceKey,
- flags: CommandFlags.DemandMaster))
- {
- logger.LogWarning(ex, "Create Consumer Group If Not Exists: failed. {info}", CurrentInfo());
- }
- else
- {
- await Task.Delay(KEY_NOT_EXISTS_DELAY);
- logger.LogDebug(ex, "Create Consumer Group If Not Exists: failed. {info}", CurrentInfo());
- }
- }
- catch (RedisConnectionException ex)
- {
- logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: connection failure. {info}", CurrentInfo());
- }
- catch (RedisTimeoutException ex)
- {
- logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: timeout failure. {info}", CurrentInfo());
- }
- catch (Exception ex)
- {
- logger.LogWarning(ex.FormatLazy(), "Create Consumer Group If Not Exists: unexpected failure. {info}", CurrentInfo());
- }
-
- #endregion // Exception Handling
- if (!exists)
- {
- try
- {
- using var lk = await _lock.AcquireAsync();
- await db.StreamCreateConsumerGroupAsync(eventSourceKey,
- consumerGroup,
- StreamPosition.Beginning,
- flags: CommandFlags.DemandMaster);
- }
- #region Exception Handling
-
- catch (RedisServerException ex)
- {
- logger.LogWarning(ex.FormatLazy(), $"{nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync: failed & still waiting {CurrentInfo()}");
- }
- catch (RedisConnectionException ex)
- {
- logger.LogWarning(ex.FormatLazy(), $"{nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync: connection failure {CurrentInfo()}");
- }
- catch (Exception ex)
- {
- logger.LogWarning(ex.FormatLazy(), $"{nameof(CreateConsumerGroupIfNotExistsAsync)}.StreamCreateConsumerGroupAsync: unexpected failure {CurrentInfo()}");
- }
-
- #endregion // Exception Handling
- }
-
- #region string CurrentInfo()
-
- string CurrentInfo() => @$"
-Try number: {tryNumber}
-Stream key: {eventSourceKey}
-Consumer Group: {consumerGroup}
-Is Connected: {db.Multiplexer.IsConnected}
-Configuration: {db.Multiplexer.Configuration}
-";
-
- #endregion // string CurrentInfo()
- }
- }
-
- #endregion // CreateConsumerGroupIfNotExistsAsync
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs
deleted file mode 100644
index f47b8b94..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisDiExtensions.cs
+++ /dev/null
@@ -1,24 +0,0 @@
-using Microsoft.Extensions.DependencyInjection;
-
-using Weknow.EventSource.Backbone;
-
-namespace Microsoft.Extensions.Configuration
-{
- ///
- /// The redis DI extensions.
- ///
- public static class RedisDiExtensions
- {
- ///
- /// Adds the event source redis connection to the DI.
- ///
- /// The services.
- /// An IServiceCollection.
- public static IServiceCollection AddEventSourceRedisConnection(
- this IServiceCollection services)
- {
- services.AddSingleton();
- return services;
- }
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisTelemetryrExtensions.cs b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisTelemetryrExtensions.cs
deleted file mode 100644
index 221ef41a..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/RedisTelemetryrExtensions.cs
+++ /dev/null
@@ -1,31 +0,0 @@
-using System.Diagnostics;
-
-namespace Weknow.EventSource.Backbone
-{
- public static class RedisTelemetryrExtensions
- {
- #region InjectTelemetryTags
-
- ///
- /// Adds standard open-telemetry tags (for redis).
- ///
- /// The meta.
- /// The activity.
- public static void InjectTelemetryTags(this Metadata meta, Activity? activity)
- {
- // These tags are added demonstrating the semantic conventions of the OpenTelemetry messaging specification
- // See:
- // * https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#messaging-attributes
- activity?.SetTag("messaging.system", "redis-stream");
- activity?.SetTag("messaging.destination_kind", "topic");
-
- activity?.SetTag("messaging.destination", meta.Operation);
- activity?.SetTag("messaging.message_id", meta.MessageId);
- activity?.SetTag("messaging.redis.key", $"{meta.Partition}:{meta.Shard}");
-
- meta.InjectMetaTelemetryTags(activity);
- }
-
- #endregion // InjectTelemetryTags
- }
-}
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.csproj b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.csproj
deleted file mode 100644
index 571df549..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.csproj
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.xml b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.xml
deleted file mode 100644
index f50410df..00000000
--- a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/Weknow.EventSource.Backbone.Channels.RedisProvider.Common.xml
+++ /dev/null
@@ -1,116 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.RedisProvider.Common
-
-
-
-
- REDIS client factory
-
-
-
-
- Blocking Create REDIS client.
- Exist only for code which don't support async (like ASP.NET setup (AddSingleton))
-
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Blocking Create REDIS client.
- Exist only for code which don't support async (like ASP.NET setup (AddSingleton))
-
- The logger.
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Create REDIS client.
-
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Create REDIS client.
-
- The logger.
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Create REDIS client.
-
- The logger.
- The number of retries.
- The configuration.
- The endpoint key.
- The password key.
-
-
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Create REDIS database client.
-
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Create REDIS database client.
-
- The logger.
- The configuration.
- The endpoint key.
- The password key.
-
- Fail to establish REDIS connection
- Fail to establish REDIS connection
-
-
-
- Creates the consumer group if not exists asynchronous.
-
- The database.
- The event source key.
- The consumer group.
- The logger.
-
-
-
-
- Adds standard open-telemetry tags (for redis).
-
- The meta.
- The activity.
-
-
-
diff --git a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/icon.png b/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/icon.png
deleted file mode 100644
index d6811ad8..00000000
Binary files a/Channels/REDIS/Weknow.EventSource.Backbone.Channels.RedisProvider.Common/icon.png and /dev/null differ
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/EventSourcing.Backbone.Channels.S3StoreConsumerProvider.csproj b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/EventSourcing.Backbone.Channels.S3StoreConsumerProvider.csproj
new file mode 100644
index 00000000..867c07d3
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/EventSourcing.Backbone.Channels.S3StoreConsumerProvider.csproj
@@ -0,0 +1,25 @@
+
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs
similarity index 91%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs
index f5c16e98..fda173c7 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategy.cs
@@ -1,14 +1,13 @@
-using System.Collections.Immutable;
-using System.Text.Json;
+using System.Text.Json;
-using Microsoft.Extensions.Logging;
+using EventSourcing.Backbone.Channels;
-using Weknow.EventSource.Backbone.Channels;
+using Microsoft.Extensions.Logging;
-using static Weknow.EventSource.Backbone.EventSourceConstants;
+using static EventSourcing.Backbone.EventSourceConstants;
-namespace Weknow.EventSource.Backbone
+namespace EventSourcing.Backbone
{
///
/// Responsible to load information from S3 storage.
@@ -52,6 +51,11 @@ public S3ConsumerStorageStrategy(
#endregion // ctor
+ ///
+ /// Gets the name of the storage provider.
+ ///
+ public string Name { get; } = "S3";
+
///
/// Load the bucket information.
///
@@ -70,8 +74,6 @@ async ValueTask IConsumerStorageStrategy.LoadBucketAsync(
Func getProperty,
CancellationToken cancellation)
{
- string id = meta.MessageId;
- var lookup = ImmutableDictionary.CreateRange(prevBucket);
string json = getProperty($"{Constants.PROVIDER_ID}~{type}");
var keyPathPairs = JsonSerializer.Deserialize[]>(
json, SerializerOptionsWithIndent) ??
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs
new file mode 100644
index 00000000..0ba31042
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs
@@ -0,0 +1,106 @@
+using Amazon.S3;
+
+using EventSourcing.Backbone.Building;
+using EventSourcing.Backbone.Channels;
+
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Extension methods for S3 storage strategy.
+ ///
+ public static class S3ConsumerStorageStrategyExtension
+ {
+ ///
+ /// Adds the S3 storage strategy.
+ ///
+ /// The builder.
+ /// The options.
+ /// Type of the target.
+ /// Either the access key or environment variable hold it (depend on the fromEnvironment parameters).
+ /// Either the secret or environment variable hold it (depend on the fromEnvironment parameters)
+ /// Either the region or environment variable hold it (depend on the fromEnvironment parameters)
+ /// if set to truelooks for the access key, secret and region in the environment variables.
+ ///
+ public static IConsumerStoreStrategyBuilder AddS3Storage(
+ this IConsumerStoreStrategyBuilder builder,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All,
+ string envAccessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
+ string envSecretKey = "S3_EVENT_SOURCE_SECRET",
+ string envRegion = "S3_EVENT_SOURCE_REGION",
+ bool fromEnvironment = true)
+ {
+ var result = builder.AddStorageStrategyFactory(Local, targetType);
+
+ ValueTask Local(ILogger logger)
+ {
+ var factory = S3RepositoryFactory.Create(logger, envAccessKey, envSecretKey, envRegion, fromEnvironment);
+ var repo = factory.Get(options);
+ var strategy = new S3ConsumerStorageStrategy(repo);
+ return strategy.ToValueTask();
+ }
+ return result;
+ }
+
+ ///
+ /// Adds the S3 storage strategy.
+ ///
+ /// The builder.
+ ///
+ /// S3 client.
+ /// Learn how to setup an AWS client: https://codewithmukesh.com/blog/aws-credentials-for-dotnet-applications/
+ ///
+ /// The options.
+ /// Type of the target.
+ ///
+ public static IConsumerStoreStrategyBuilder AddS3Storage(
+ this IConsumerStoreStrategyBuilder builder,
+ IAmazonS3 client,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All)
+ {
+ var result = builder.AddStorageStrategyFactory(Local, targetType);
+
+ ValueTask Local(ILogger logger)
+ {
+ var factory = S3RepositoryFactory.Create(logger, client);
+ var repo = factory.Get(options);
+ var strategy = new S3ConsumerStorageStrategy(repo);
+ return strategy.ToValueTask();
+ }
+ return result;
+ }
+
+ ///
+ /// Adds the S3 storage strategy.
+ /// Will resolve IAmazonS3
+ /// See the following article for more details on how can you register Amazon credentials:
+ /// https://codewithmukesh.com/blog/aws-credentials-for-dotnet-applications/
+ ///
+ /// The builder.
+ /// The options.
+ /// Type of the target.
+ ///
+ public static IConsumerStoreStrategyBuilder ResolveS3Storage(
+ this IConsumerIocStoreStrategyBuilder builder,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All)
+ {
+ ILogger? logger = builder.ServiceProvider.GetService>();
+ IAmazonS3? s3Client = builder.ServiceProvider.GetService();
+ if (s3Client != null)
+ {
+ var injectionResult = builder.AddS3Storage(s3Client, options, targetType);
+ logger?.LogInformation("Consumer, Resolving AWS S3 via IAmazonS3 injection (might be via profile)");
+ return injectionResult;
+ }
+ logger?.LogInformation("Consumer, Resolving AWS S3 via environment variable");
+ var envVarResult = builder.AddS3Storage(options, targetType);
+ return envVarResult;
+ }
+ }
+}
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/icon.png b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreConsumerProvider/icon.png differ
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/EventSourcing.Backbone.Channels.S3StoreProducerProvider.csproj b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/EventSourcing.Backbone.Channels.S3StoreProducerProvider.csproj
new file mode 100644
index 00000000..a28a0266
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/EventSourcing.Backbone.Channels.S3StoreProducerProvider.csproj
@@ -0,0 +1,24 @@
+
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs
similarity index 90%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs
index 58dd3df1..c2fc1530 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategy.cs
@@ -1,14 +1,14 @@
using System.Collections.Immutable;
using System.Text.Json;
-using Microsoft.Extensions.Logging;
+using EventSourcing.Backbone.Channels;
-using Weknow.EventSource.Backbone.Channels;
+using Microsoft.Extensions.Logging;
-using static Weknow.EventSource.Backbone.EventSourceConstants;
+using static EventSourcing.Backbone.EventSourceConstants;
-namespace Weknow.EventSource.Backbone
+namespace EventSourcing.Backbone
{
///
/// Responsible to save information to S3 storage.
@@ -53,6 +53,11 @@ public S3ProducerStorageStrategy(
#endregion // ctor
+ ///
+ /// Gets the name of the storage provider.
+ ///
+ public string Name { get; } = "S3";
+
///
/// Saves the bucket information.
///
@@ -73,7 +78,7 @@ async ValueTask> IProducerStorageStrategy.S
{
var date = DateTime.UtcNow;
int index = Interlocked.Increment(ref _index);
- string basePath = $"{meta.Partition}/{meta.Shard}/{date:yyyy-MM-dd/HH:mm}/{meta.Operation}/{id}/{index}/{type}";
+ string basePath = $"{meta.Uri}/{date:yyyy-MM-dd/HH:mm}/{meta.Operation}/{id}/{index}/{type}";
var tasks = bucket.Select(SaveAsync);
var propKeyToS3Key = await Task.WhenAll(tasks);
string json = JsonSerializer.Serialize(propKeyToS3Key, SerializerOptionsWithIndent);
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs
new file mode 100644
index 00000000..c0e4be9b
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs
@@ -0,0 +1,110 @@
+using Amazon.S3;
+
+using EventSourcing.Backbone.Channels;
+
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+
+namespace EventSourcing.Backbone
+{
+ ///
+ /// Extension methods for S3 storage strategy.
+ ///
+ public static class S3ProducerStorageStrategyExtension
+ {
+ ///
+ /// Adds the S3 storage strategy.
+ ///
+ /// The builder.
+ /// The options.
+ /// Type of the target.
+ /// The filter of which keys in the bucket will be store into this storage.
+ /// Either the access key or environment variable hold it (depend on the fromEnvironment parameters).
+ /// Either the secret or environment variable hold it (depend on the fromEnvironment parameters)
+ /// Either the region or environment variable hold it (depend on the fromEnvironment parameters)
+ /// if set to truelooks for the access key, secret and region in the environment variables.
+ ///
+ public static IProducerStoreStrategyBuilder AddS3Storage(
+ this IProducerStoreStrategyBuilder builder,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All,
+ Predicate? filter = null,
+ string envAccessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
+ string envSecretKey = "S3_EVENT_SOURCE_SECRET",
+ string envRegion = "S3_EVENT_SOURCE_REGION",
+ bool fromEnvironment = true)
+ {
+ var result = builder.AddStorageStrategy(Local, targetType, filter);
+
+ ValueTask Local(ILogger logger)
+ {
+ var factory = S3RepositoryFactory.Create(logger, envAccessKey, envSecretKey, envRegion, fromEnvironment);
+ var repo = factory.Get(options);
+ var strategy = new S3ProducerStorageStrategy(repo);
+ return strategy.ToValueTask();
+ }
+
+ return result;
+ }
+
+ ///
+ /// Adds the S3 storage strategy.
+ ///
+ /// The builder.
+ ///
+ /// S3 client.
+ /// Learn how to setup an AWS client: https://codewithmukesh.com/blog/aws-credentials-for-dotnet-applications/
+ ///
+ /// The options.
+ /// Type of the target.
+ /// The filter of which keys in the bucket will be store into this storage.
+ ///
+ public static IProducerStoreStrategyBuilder AddS3Storage(
+ this IProducerStoreStrategyBuilder builder,
+ IAmazonS3 client,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All,
+ Predicate? filter = null)
+ {
+ var result = builder.AddStorageStrategy(Local, targetType, filter);
+
+ ValueTask Local(ILogger logger)
+ {
+ var factory = S3RepositoryFactory.Create(logger, client);
+ var repo = factory.Get(options);
+ var strategy = new S3ProducerStorageStrategy(repo);
+ return strategy.ToValueTask();
+ }
+
+ return result;
+ }
+
+ ///
+ /// Adds the S3 storage strategy.
+ ///
+ /// The builder.
+ /// The options.
+ /// Type of the target.
+ /// The filter of which keys in the bucket will be store into this storage.
+ ///
+ public static IProducerStoreStrategyBuilder ResolveS3Storage(
+ this IProducerIocStoreStrategyBuilder builder,
+ S3Options options = default,
+ EventBucketCategories targetType = EventBucketCategories.All,
+ Predicate? filter = null)
+ {
+ ILogger? logger = builder.ServiceProvider.GetService>();
+ IAmazonS3? s3Client = builder.ServiceProvider.GetService();
+
+ if (s3Client != null)
+ {
+ IProducerStoreStrategyBuilder injectionResult = builder.AddS3Storage(s3Client, options, targetType, filter);
+ logger?.LogInformation("Producer, Resolving AWS S3 via IAmazonS3 injection (might be via profile)");
+ return injectionResult;
+ }
+ logger?.LogInformation("Producer, Resolving AWS S3 via environment variable");
+ IProducerStoreStrategyBuilder envVarResult = builder.AddS3Storage(options, targetType, filter);
+ return envVarResult;
+ }
+ }
+}
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/icon.png b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProducerProvider/icon.png differ
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs
similarity index 81%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs
index 4e893828..31b1a321 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/BlobResponse.cs
@@ -1,35 +1,35 @@
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// Response structure
///
- public class BlobResponse : IEquatable
+ public sealed class BlobResponse : IEquatable
{
#region Ctor
///
/// Prevents a default instance of the class from being created.
///
+#pragma warning disable S1133 // Deprecated code should be removed
[Obsolete("Use other constructors (this one exists to enable de-serialization)", true)]
+#pragma warning restore S1133 // Deprecated code should be removed
private BlobResponse() { }
///
/// Create request instance.
///
/// The blob key.
- /// The partition.
/// The e tag.
/// The content version.
/// Name of the file.
+ ///
public BlobResponse(
string key,
- string partition,
string eTag,
string contentVersion,
string? fileName = null)
{
_key = key;
- _partition = partition;
_eTag = eTag;
_contentVersion = contentVersion;
_fileName = fileName;
@@ -46,27 +46,14 @@ public BlobResponse(
public string Key
{
get => _key;
+#pragma warning disable S1133 // Deprecated code should be removed
[Obsolete("Exposed for the serializer", true)]
+#pragma warning restore S1133 // Deprecated code should be removed
set => _key = value;
}
#endregion Key
- #region Partition
-
- private string _partition = string.Empty;
- ///
- /// Gets or sets the partition.
- ///
- public string Partition
- {
- get => _partition;
- [Obsolete("Exposed for the serializer", true)]
- set => _partition = value;
- }
-
- #endregion Partition
-
#region FileName
private string? _fileName = string.Empty;
@@ -76,7 +63,9 @@ public string Partition
public string? FileName
{
get => _fileName;
+#pragma warning disable S1133 // Deprecated code should be removed
[Obsolete("Exposed for the serializer", true)]
+#pragma warning restore S1133 // Deprecated code should be removed
set => _fileName = value;
}
@@ -91,7 +80,9 @@ public string? FileName
public string ETag
{
get => _eTag;
+#pragma warning disable S1133 // Deprecated code should be removed
[Obsolete("Exposed for the serializer", true)]
+#pragma warning restore S1133 // Deprecated code should be removed
set => _eTag = value;
}
@@ -106,7 +97,9 @@ public string ETag
public string ContentVersion
{
get => _contentVersion;
+#pragma warning disable S1133 // Deprecated code should be removed
[Obsolete("Exposed for the serializer", true)]
+#pragma warning restore S1133 // Deprecated code should be removed
set => _contentVersion = value;
}
@@ -138,7 +131,6 @@ public bool Equals(BlobResponse? other)
return other != null &&
_key == other._key &&
_fileName == other._fileName &&
- _partition == other._partition &&
_eTag == other._eTag &&
_contentVersion == other._contentVersion;
}
@@ -149,10 +141,17 @@ public bool Equals(BlobResponse? other)
///
/// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table.
///
+#pragma warning disable S2328 // "GetHashCode" should not reference mutable fields
public override int GetHashCode()
{
- return HashCode.Combine(_key, _fileName, _partition, _eTag, _contentVersion);
+ return HashCode.Combine(
+ _key,
+ _fileName,
+ _eTag,
+ _contentVersion);
}
+#pragma warning restore S2328 // "GetHashCode" should not reference mutable fields
+
///
/// Implements the operator ==.
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Constants.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/Constants.cs
similarity index 75%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Constants.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/Constants.cs
index 2e101a17..a6f4ac4c 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Constants.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/Constants.cs
@@ -1,4 +1,4 @@
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// Constants
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/EventSourcing.Backbone.Channels.S3StoreProvider.Common.csproj b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/EventSourcing.Backbone.Channels.S3StoreProvider.Common.csproj
new file mode 100644
index 00000000..2ff1b791
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/EventSourcing.Backbone.Channels.S3StoreProvider.Common.csproj
@@ -0,0 +1,25 @@
+
+
+
+ README.md
+
+
+
+
+ True
+ \
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs
similarity index 98%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs
index 1df734dc..2a527832 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3Repository.cs
@@ -1,7 +1,7 @@
using System.Collections.Immutable;
using System.Text.Json;
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// The S3 repository contract.
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs
similarity index 66%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs
index 8da82527..7260ccbb 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/IS3RepositoryFactory.cs
@@ -1,4 +1,4 @@
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
public interface IS3RepositoryFactory
{
@@ -7,6 +7,6 @@ public interface IS3RepositoryFactory
///
///
///
- S3Repository Get(S3Options options = default);
+ IS3Repository Get(S3Options options = default);
}
}
\ No newline at end of file
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs
similarity index 82%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs
index 02fd4e83..a1fe9441 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3EnvironmentConvention.cs
@@ -1,4 +1,4 @@
-namespace Weknow.EventSource.Backbone
+namespace EventSourcing.Backbone
{
///
/// Environment convention's options
@@ -10,7 +10,7 @@ public enum S3EnvironmentConvention
///
None,
///
- /// Environment as bucket perfix
+ /// Environment as bucket prefix
///
BucketPrefix,
///
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Options.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Options.cs
similarity index 89%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Options.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Options.cs
index 47156c18..077f5e46 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Options.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Options.cs
@@ -1,4 +1,4 @@
-namespace Weknow.EventSource.Backbone
+namespace EventSourcing.Backbone
{
///
/// S3 provider options
@@ -9,7 +9,7 @@ public S3Options()
{
Bucket = null;
BasePath = null;
- EnvironmentConvension = S3EnvironmentConvention.None;
+ EnvironmentConvension = S3EnvironmentConvention.BucketPrefix;
}
///
@@ -17,6 +17,7 @@ public S3Options()
///
public string? Bucket { get; init; }
+
///
/// Base path
///
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs
similarity index 87%
rename from Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs
rename to Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs
index 16012089..094fe00b 100644
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3Repository.cs
@@ -7,15 +7,15 @@
using Microsoft.Extensions.Logging;
-using static Weknow.EventSource.Backbone.EventSourceConstants;
+using static EventSourcing.Backbone.EventSourceConstants;
-namespace Weknow.EventSource.Backbone.Channels
+namespace EventSourcing.Backbone.Channels
{
///
/// Abstract S3 operations
///
- public sealed class S3Repository : IS3Repository, IDisposable
+ internal sealed class S3Repository : IS3Repository, IDisposable
{
private static readonly string BUCKET =
Environment.GetEnvironmentVariable("S3_EVENT_SOURCE_BUCKET")
@@ -24,11 +24,10 @@ public sealed class S3Repository : IS3Repository, IDisposable
private readonly string _bucket;
private readonly ILogger _logger;
private readonly string? _basePath;
- private readonly AmazonS3Client _client;
+ private readonly IAmazonS3 _client;
private static readonly List EMPTY_TAGS = new List();
private int _disposeCount = 0;
private readonly S3EnvironmentConvention _environmentConvension;
- private const StringComparison STRING_COMPARISON = StringComparison.OrdinalIgnoreCase;
private readonly bool _dryRun;
#region Ctor
@@ -36,11 +35,14 @@ public sealed class S3Repository : IS3Repository, IDisposable
///
/// Initializes a new instance.
///
- /// S3 client.
+ ///
+ /// S3 client.
+ /// Learn how to setup an AWS client: https://codewithmukesh.com/blog/aws-credentials-for-dotnet-applications/
+ ///
/// The logger.
/// The s3 options.
public S3Repository(
- AmazonS3Client client,
+ IAmazonS3 client,
ILogger logger,
S3Options options = default)
{
@@ -154,7 +156,7 @@ public async ValueTask GetAsync(Env env, string id, CancellationToken canc
if (response == null)
{
- throw new NullReferenceException("Failed to deserialize industries");
+ throw new EventSourcingException("Failed to deserialize industries");
}
#endregion // Validation
@@ -205,12 +207,12 @@ public async ValueTask GetStreamAsync(Env env, string id, CancellationTo
if (res == null)
{
- throw new Exception($"S3 key [{key}] not found. bucket = {bucketName}");
+ throw new EventSourcingException($"S3 key [{key}] not found. bucket = {bucketName}");
}
if (res.HttpStatusCode >= HttpStatusCode.Ambiguous)
{
- throw new Exception($"Failed to get blob [{res.HttpStatusCode}]");
+ throw new EventSourcingException($"Failed to get blob [{res.HttpStatusCode}]");
}
#endregion // Validation
@@ -314,8 +316,11 @@ public async ValueTask SaveAsync(
string key = GetKey(env, id);
try
{
- var date = DateTime.UtcNow;
- //tags = tags.Add("month", date.ToString("yyyy-MM"));
+
+#pragma warning disable S125 // Sections of code should not be commented out
+ // var date = DateTime.UtcNow;
+ // tags = tags.Add("month", date.ToString("yyyy-MM"));
+#pragma warning restore S125 // Sections of code should not be commented out
var s3Request = new PutObjectRequest
{
@@ -325,6 +330,7 @@ public async ValueTask SaveAsync(
ContentType = mediaType,
TagSet = tags?.Select(m => new Tag { Key = m.Key, Value = m.Value })?.ToList() ?? EMPTY_TAGS,
};
+
// s3Request.Headers.ExpiresUtc = DateTime.Now.AddHours(2); // cache expiration
if (metadata != null)
@@ -339,7 +345,7 @@ public async ValueTask SaveAsync(
if (_dryRun)
{
- return new BlobResponse(key, _bucket, string.Empty, string.Empty);
+ return new BlobResponse(key, string.Empty, string.Empty);
}
#endregion // if (_dryRun) return ...
@@ -350,16 +356,18 @@ public async ValueTask SaveAsync(
if (res.HttpStatusCode >= HttpStatusCode.Ambiguous)
{
- throw new Exception($"Failed to save blob [{res.HttpStatusCode}]");
+ throw new EventSourcingException($"Failed to save blob [{res.HttpStatusCode}]");
}
#endregion // Validation
- BlobResponse response = new BlobResponse(key, _bucket, res.ETag, res.VersionId);
+ BlobResponse response = new BlobResponse(key, res.ETag, res.VersionId);
return response;
}
#region Exception Handling
+#pragma warning disable S2486 // Generic exceptions should not be ignored
+#pragma warning disable S108 // Nested blocks of code should not be left empty
catch (AmazonS3Exception e)
{
string json = "";
@@ -369,9 +377,15 @@ public async ValueTask SaveAsync(
}
catch { }
_logger.LogError(e.FormatLazy(),
- "AWS-S3 Failed to write: {payload}, {env}, {id}, {bucket}, {key}", json, env, id, bucket, key);
- string msg = $"AWS-S3 Failed to write: {env}, {id}, {bucket}, {key}";
- throw new ApplicationException(msg, e);
+ """
+ AWS-S3 Failed to write: {payload}, {env}, {id}, {bucket}, {key}
+ Make sure to that the bucket exists & credentials sets right.
+ """, json, env, id, bucket, key);
+ string msg = $"""
+ AWS-S3 Failed to write: {env}, {id}, {bucket}, {key}
+ Make sure to that the bucket exists & credentials sets right.
+ """;
+ throw new EventSourcingException(msg, e);
}
catch (Exception e)
{
@@ -384,8 +398,10 @@ public async ValueTask SaveAsync(
_logger.LogError(e.FormatLazy(),
"S3 writing Failed: {payload}, {env}, {id}, {bucket}, {key}", json, env, id, bucket, key);
string msg = $"S3 writing Failed: {env}, {id}, {bucket}, {key}";
- throw new ApplicationException(msg, e);
+ throw new EventSourcingException(msg, e);
}
+#pragma warning restore S2486
+#pragma warning restore S108
#endregion // Exception Handling
}
@@ -401,6 +417,8 @@ public async ValueTask SaveAsync(
///
private string GetBucket(Env env)
{
+ if (string.IsNullOrEmpty(env))
+ return _bucket;
var bucket = _environmentConvension switch
{
S3EnvironmentConvention.BucketPrefix => $"{env.Format()}.{_bucket}",
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs
new file mode 100644
index 00000000..6b6bd9ab
--- /dev/null
+++ b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs
@@ -0,0 +1,147 @@
+using System.Collections.Concurrent;
+
+using Amazon;
+using Amazon.S3;
+
+using Microsoft.Extensions.Logging;
+
+
+namespace EventSourcing.Backbone.Channels
+{
+ ///
+ /// Abstract S3 operations
+ ///
+ public sealed class S3RepositoryFactory : IS3RepositoryFactory
+ {
+ private readonly ILogger _logger;
+ private readonly IAmazonS3 _client;
+ private readonly ConcurrentDictionary _cache = new ConcurrentDictionary();
+
+ #region CreateClient
+
+ ///
+ /// Creates the S3 client.
+ ///
+ /// The access key or environment variable which hold it.
+ /// The secret or environment variable which hold it .
+ /// The region environment variable which hold it.
+ /// if set to true [will try to find the value from environment variable.
+ ///
+ public static IAmazonS3 CreateClient(
+ string accessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
+ string secret = "S3_EVENT_SOURCE_SECRET",
+ string region = "S3_EVENT_SOURCE_REGION",
+ bool fromEnvironment = true)
+ {
+ accessKey =
+ fromEnvironment
+ ? Environment.GetEnvironmentVariable(accessKey) ?? accessKey
+ : accessKey;
+ secret =
+ fromEnvironment
+ ? Environment.GetEnvironmentVariable(secret) ?? secret
+ : secret;
+ string? regionKey =
+ fromEnvironment
+ ? Environment.GetEnvironmentVariable(region) ?? region
+ : region;
+
+ RegionEndpoint rgnKey = (!string.IsNullOrEmpty(regionKey))
+ ? RegionEndpoint.GetBySystemName(regionKey)
+ : RegionEndpoint.USEast2;
+ var client = new AmazonS3Client(accessKey, secret, rgnKey);
+ return client;
+ }
+
+ #endregion // CreateClient
+
+ #region Create
+
+ ///
+ /// Creates the specified logger.
+ ///
+ /// The logger.
+ /// The access key or environment variable which hold it.
+ /// The secret or environment variable which hold it .
+ /// The region environment variable which hold it.
+ /// if set to true [will try to find the value from environment variable.
+ ///
+ public static IS3RepositoryFactory Create(ILogger logger,
+ string accessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
+ string secret = "S3_EVENT_SOURCE_SECRET",
+ string region = "S3_EVENT_SOURCE_REGION",
+ bool fromEnvironment = true)
+ {
+ var client = CreateClient(accessKey, secret, region, fromEnvironment);
+ return new S3RepositoryFactory(logger, client);
+ }
+
+ ///
+ /// Creates the specified logger.
+ ///
+ /// The logger.
+ ///
+ /// S3 client.
+ /// Learn how to setup an AWS client: https://codewithmukesh.com/blog/aws-credentials-for-dotnet-applications/
+ ///
+ ///
+ public static IS3RepositoryFactory Create(ILogger logger,
+ IAmazonS3 client)
+ {
+ return new S3RepositoryFactory(logger, client);
+ }
+
+ #endregion // Create
+
+ #region Ctor
+
+ ///
+ /// Initializes a new instance.
+ ///
+ /// The logger.
+ /// The client.
+ public S3RepositoryFactory(
+ ILogger logger,
+ IAmazonS3 client)
+ {
+ _logger = logger;
+
+ _client = client;
+ }
+
+ #endregion // Ctor
+
+ #region Get
+
+ ///
+ /// Get repository instance.
+ ///
+ /// The options.
+ ///
+ IS3Repository IS3RepositoryFactory.Get(S3Options options)
+ {
+
+ var repo = _cache.GetOrAdd(options, CreateInternal);
+ repo.AddReference();
+ return repo;
+ }
+
+ #endregion // Get
+
+ #region CreateInternal
+
+ ///
+ /// Creates repository.
+ ///
+ /// The options.
+ ///
+ private S3Repository CreateInternal(
+ S3Options options = default)
+ {
+ return new S3Repository(_client, _logger, options);
+ }
+
+ #endregion // CreateInternal
+ }
+
+}
diff --git a/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/icon.png b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/icon.png
new file mode 100644
index 00000000..17d68338
Binary files /dev/null and b/Channels/S3/EventSourcing.Backbone.Channels.S3StoreProvider.Common/icon.png differ
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs
deleted file mode 100644
index 02e8fc18..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/S3ConsumerStorageStrategyExtension.cs
+++ /dev/null
@@ -1,41 +0,0 @@
-
-using Microsoft.Extensions.Logging;
-
-using Weknow.EventSource.Backbone.Building;
-using Weknow.EventSource.Backbone.Channels;
-
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Extension methods for S3 storage strategy.
- ///
- public static class S3ConsumerStorageStrategyExtension
- {
- ///
- /// Adds the S3 storage strategy.
- ///
- /// The builder.
- /// The options.
- /// Type of the target.
- ///
- public static IConsumerStoreStrategyBuilder AddS3Strategy(
- this IConsumerStoreStrategyBuilder builder,
- S3Options options = default,
- EventBucketCategories targetType = EventBucketCategories.All)
- {
- var result = builder.AddStorageStrategyFactory(Local, targetType);
-
- ValueTask Local(ILogger logger)
- {
- var factory = S3RepositoryFactory.Create(logger);
- var repo = factory.Get(options);
- var strategy = new S3ConsumerStorageStrategy(repo);
- return strategy.ToValueTask();
- }
- return result;
- }
-
-
- }
-}
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider.csproj b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider.csproj
deleted file mode 100644
index a807d788..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider.csproj
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProviderr.xml b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProviderr.xml
deleted file mode 100644
index f28cfed5..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProviderr.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider
-
-
-
-
- Responsible to load information from S3 storage.
- The information can be either Segmentation or Interception.
- When adding it via the builder it can be arrange in a chain in order of having
- 'Chain of Responsibility' for saving different parts into different storage (For example GDPR's PII).
- Alternative, chain can serve as a cache layer.
-
-
-
-
- Initializes a new instance.
-
- S3 repository.
- Use S3Factory in order to create it (will create one if missing).
- S3Factory will read credentials from the following environment variables: "S3_ACCESS_KEY", "S3_SECRET", "S3_REGION".
-
-
-
- Initializes a new instance.
-
- The logger.
- The bucket.
- The base path.
- The repository's factory.
-
-
-
- Load the bucket information.
-
- The meta fetch provider.
- The current bucket (previous item in the chain).
- The type of the storage.
- The get property.
- The cancellation.
-
- Either Segments or Interceptions.
-
-
-
-
- Extension methods for S3 storage strategy.
-
-
-
-
- Adds the S3 storage strategy.
-
- The builder.
- The bucket.
- The base path.
- Type of the target.
-
-
-
-
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/icon.png b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/icon.png
deleted file mode 100644
index d6811ad8..00000000
Binary files a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreConsumerProvider/icon.png and /dev/null differ
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs
deleted file mode 100644
index 25c77337..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/S3ProducerStorageStrategyExtension.cs
+++ /dev/null
@@ -1,49 +0,0 @@
-
-using Microsoft.Extensions.Logging;
-
-using Weknow.EventSource.Backbone.Channels;
-
-
-namespace Weknow.EventSource.Backbone
-{
- ///
- /// Extension methods for S3 storage strategy.
- ///
- public static class S3ProducerStorageStrategyExtension
- {
- ///
- /// Adds the S3 storage strategy.
- ///
- /// The builder.
- /// The options.
- /// Type of the target.
- /// The filter of which keys in the bucket will be store into this storage.
- /// The environment variable of access key.
- /// The environment variable of secret key.
- /// The environment variable of region.
- ///
- public static IProducerStoreStrategyBuilder AddS3Strategy(
- this IProducerStoreStrategyBuilder builder,
- S3Options options = default,
- EventBucketCategories targetType = EventBucketCategories.All,
- Predicate? filter = null,
- string envAccessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
- string envSecretKey = "S3_EVENT_SOURCE_SECRET",
- string envRegion = "S3_EVENT_SOURCE_REGION")
- {
- var result = builder.AddStorageStrategy(Local, targetType, filter);
-
- ValueTask Local(ILogger logger)
- {
- var factory = S3RepositoryFactory.Create(logger, envAccessKey, envSecretKey, envRegion);
- var repo = factory.Get(options);
- var strategy = new S3ProducerStorageStrategy(repo);
- return strategy.ToValueTask();
- }
-
- return result;
- }
-
-
- }
-}
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.csproj b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.csproj
deleted file mode 100644
index 6ea2c899..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.csproj
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-
-
-
-
-
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.xml b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.xml
deleted file mode 100644
index cb908e12..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider
-
-
-
-
- Responsible to save information to S3 storage.
- The information can be either Segmentation or Interception.
- When adding it via the builder it can be arrange in a chain in order of having
- 'Chain of Responsibility' for saving different parts into different storage (For example GDPR's PII).
- Alternative, chain can serve as a cache layer.
-
-
-
-
- Initializes a new instance.
-
- S3 repository.
- Use S3Factory in order to create it (will create one if missing).
- S3Factory will read credentials from the following environment variables: "S3_ACCESS_KEY", "S3_SECRET", "S3_REGION".
-
-
-
- Initializes a new instance.
-
- The logger.
- The bucket.
- The base path.
- The repository's factory.
-
-
-
- Saves the bucket information.
-
- The identifier.
- Either Segments or Interceptions.
- The type.
- The meta.
- The cancellation.
-
- Array of metadata entries which can be used by the consumer side storage strategy, in order to fetch the data.
-
-
-
-
- Extension methods for S3 storage strategy.
-
-
-
-
- Adds the S3 storage strategy.
-
- The builder.
- The bucket.
- The base path.
- Type of the target.
- The filter of which keys in the bucket will be store into this storage.
-
-
-
-
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/icon.png b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/icon.png
deleted file mode 100644
index d6811ad8..00000000
Binary files a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProducerProvider/icon.png and /dev/null differ
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs
deleted file mode 100644
index 6ce7c72d..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/S3RepositoryFactory.cs
+++ /dev/null
@@ -1,114 +0,0 @@
-using System.Collections.Concurrent;
-
-using Amazon;
-using Amazon.S3;
-
-using Microsoft.Extensions.Logging;
-
-
-namespace Weknow.EventSource.Backbone.Channels
-{
- ///
- /// Abstract S3 operations
- ///
- public sealed class S3RepositoryFactory : IS3RepositoryFactory
- {
- private readonly ILogger _logger;
- private readonly AmazonS3Client _client;
- private readonly ConcurrentDictionary _cache = new ConcurrentDictionary();
-
- #region Create
-
- ///
- /// Creates the specified logger.
- ///
- /// The logger.
- /// The environment variable of access key.
- /// The environment variable of secret key.
- /// The environment variable of region.
- ///
- public static IS3RepositoryFactory Create(ILogger logger,
- string envAccessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
- string envSecretKey = "S3_EVENT_SOURCE_SECRET",
- string envRegion = "S3_EVENT_SOURCE_REGION") => new S3RepositoryFactory(logger, envAccessKey, envSecretKey, envRegion);
-
- #endregion // Create
-
- #region Ctor
-
- ///
- /// Initializes a new instance.
- ///
- /// The logger.
- public S3RepositoryFactory(
- ILogger logger) : this((ILogger)logger)
- {
- }
-
- ///
- /// Initializes a new instance.
- ///
- /// The logger.
- /// The environment variable of access key.
- /// The environment variable of secret key.
- /// The environment variable of region.
- public S3RepositoryFactory(
- ILogger logger,
- string envAccessKey = "S3_EVENT_SOURCE_ACCESS_KEY",
- string envSecretKey = "S3_EVENT_SOURCE_SECRET",
- string envRegion = "S3_EVENT_SOURCE_REGION")
- {
- _logger = logger;
-
- string accessKey =
- Environment.GetEnvironmentVariable(envAccessKey) ?? "";
- string secretKey =
- Environment.GetEnvironmentVariable(envSecretKey) ?? "";
- string? regionKey =
- Environment.GetEnvironmentVariable(envRegion);
- RegionEndpoint rgnKey = (!string.IsNullOrEmpty(regionKey))
- ? RegionEndpoint.GetBySystemName(regionKey)
- : RegionEndpoint.USEast2;
-
- _client = new AmazonS3Client(
- accessKey,
- secretKey,
- rgnKey);
- }
-
- #endregion // Ctor
-
- #region Get
-
- ///
- /// Get repository instance.
- ///
- /// The options.
- ///
- S3Repository IS3RepositoryFactory.Get(S3Options options)
- {
-
- var repo = _cache.GetOrAdd(options, CreateInternal);
- repo.AddReference();
- return repo;
- }
-
- #endregion // Get
-
- #region CreateInternal
-
- ///
- /// Creates repository.
- ///
- /// The options.
- ///
- private S3Repository CreateInternal(
- S3Options options = default)
- {
- return new S3Repository(_client, _logger, options);
- }
-
- #endregion // CreateInternal
- }
-
-}
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.csproj b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.csproj
deleted file mode 100644
index b0c1e45d..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.csproj
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.xml b/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.xml
deleted file mode 100644
index 8584bf01..00000000
--- a/Channels/S3/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common/Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common.xml
+++ /dev/null
@@ -1,265 +0,0 @@
-
-
-
- Weknow.EventSource.Backbone.Channels.S3StoreProvider.Common
-
-
-
-
- Response structure
-
-
-
-
- Prevents a default instance of the