LLamaSharp/LLama.SemanticKernel/ExtensionMethods.cs

50 lines
1.7 KiB
C#
Raw Normal View History

2023-10-13 19:56:21 +08:00
using LLamaSharp.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.AI.ChatCompletion;
2023-08-31 16:24:44 +08:00
2023-08-31 21:22:38 +08:00
namespace LLamaSharp.SemanticKernel;
2023-08-31 16:24:44 +08:00
internal static class ExtensionMethods
{
internal static global::LLama.Common.ChatHistory ToLLamaSharpChatHistory(this ChatHistory chatHistory)
{
if (chatHistory is null)
{
throw new ArgumentNullException(nameof(chatHistory));
}
var history = new global::LLama.Common.ChatHistory();
foreach (var chat in chatHistory)
{
var role = Enum.TryParse<global::LLama.Common.AuthorRole>(chat.Role.Label, out var _role) ? _role : global::LLama.Common.AuthorRole.Unknown;
history.AddMessage(role, chat.Content);
}
return history;
}
/// <summary>
/// Convert ChatRequestSettings to LLamaSharp InferenceParams
/// </summary>
/// <param name="requestSettings"></param>
/// <returns></returns>
internal static global::LLama.Common.InferenceParams ToLLamaSharpInferenceParams(this ChatRequestSettings requestSettings)
{
if (requestSettings is null)
{
throw new ArgumentNullException(nameof(requestSettings));
}
var antiPrompts = new List<string>(requestSettings.StopSequences) { AuthorRole.User.ToString() + ":" };
return new global::LLama.Common.InferenceParams
{
Temperature = (float)requestSettings.Temperature,
TopP = (float)requestSettings.TopP,
PresencePenalty = (float)requestSettings.PresencePenalty,
FrequencyPenalty = (float)requestSettings.FrequencyPenalty,
AntiPrompts = antiPrompts,
MaxTokens = requestSettings.MaxTokens ?? -1
};
}
}