When using Microsoft.SemanticKernel, it's important to centralize and reuse kernel setup logic rather than repeating configuration for each consumer or request. This avoids the overhead of reinitializing connectors and plugins. SemanticKernelCache supports this by providing a thread-safe, per-key singleton cache that lazily creates Kernel instances using customizable options. Kernels are disposed at application shutdown or manually if needed.
Install the package via the .NET CLI:
dotnet add package Soenneker.SemanticKernel.CacheIn your Program.cs (or equivalent startup file), register the cache with the DI container:
using Soenneker.SemanticKernel.Cache;
public static async Task Main(string[] args)
{
var builder = WebApplication.CreateBuilder(args);
// Register SemanticKernelCache as a singleton service.
builder.Services.AddSemanticKernelCacheAsSingleton();
// Other configuration...
}Inject ISemanticKernelCache into your classes and retrieve a Microsoft.SemanticKernel.Kernel instance by providing the required options.
using System.Threading;
using System.Threading.Tasks;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Chat;
using Soenneker.SemanticKernel.Cache;
public class TestClass
{
private readonly ISemanticKernelCache _semanticKernelCache;
private readonly SemanticKernelOptions _options;
public TestClass(ISemanticKernelCache semanticKernelCache)
{
_semanticKernelCache = semanticKernelCache;
// Create the options object once. Replace these with your actual values.
var options = new SemanticKernelOptions
{
ModelId = "deepseek-r1:32b",
Endpoint = "http://localhost:11434",
KernelFactory = (opts, ct) =>
{
IKernelBuilder builder = Kernel.CreateBuilder().AddOllamaChatCompletion(opts.ModelId, new Uri(opts.Endpoint));
return ValueTask.FromResult(builder);
}
};
}
public async async ValueTask<string> GetKernelResponse(string input, CancellationToken cancellationToken = default)
{
// Retrieve (or create) the kernel instance using a key (here, nameof(TestClass)).
Kernel kernel = await _semanticKernelCache.Get(nameof(TestClass), _options, cancellationToken);
// Retrieve the chat completion service from the kernel.
var chatCompletionService = kernel.GetRequiredService<IChatCompletionService>();
// Create a chat history and add the user's message.
var history = new ChatHistory();
history.AddUserMessage(input);
// Request a chat completion using the chat service.
var chatResult = await chatCompletionService.GetChatMessageContentAsync(history, kernel: kernel);
// Return the chat result (or process it further as needed).
return chatResult.ToString();
}
}The SemanticKernelOptions class includes an optional KernelFactory delegate. This allows you to override the default behavior (which uses the Azure Text Completion service) and create the kernel using a different connector or plugin. For example:
var openAiOptions = new SemanticKernelOptions
{
ModelId = "openai-model-id",
Endpoint = "https://api.openai.com/v1/",
ApiKey = "your-openai-api-key",
KernelFactory = (opts, ct) =>
{
Kernel kernel = new KernelBuilder().AddOpenAITextCompletionService(opts.ModelId, opts.Endpoint, opts.ApiKey);
return ValueTask.FromResult(kernel);
},
ConfigureKernelAsync = async kernel =>
{
// Optionally, import skills or perform additional configuration.
await ValueTask.CompletedTask;
}
};
Kernel openAiKernel = await semanticKernelCache.Get("openaiKernel", openAiOptions);This design makes it straightforward to support multiple types of Semantic Kernel configurations using the same caching mechanism.
