Merge remote-tracking branch 'upstream/master' into RuntimeDetection

This commit is contained in:
SignalRT 2023-11-11 09:09:03 +01:00
commit fb95bbb4e0
5 changed files with 95 additions and 20 deletions

View File

@ -30,6 +30,7 @@
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Console" Version="7.0.0" />
<PackageReference Include="Microsoft.SemanticKernel" Version="1.0.0-beta1" />
<PackageReference Include="Spectre.Console" Version="0.47.0" />
</ItemGroup>
<ItemGroup>

View File

@ -4,6 +4,27 @@
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<Version>0.7.1</Version>
<Authors>Xbotter</Authors>
<Company>SciSharp STACK</Company>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<Copyright>MIT, SciSharp STACK $([System.DateTime]::UtcNow.ToString(yyyy))</Copyright>
<RepositoryUrl>https://github.com/SciSharp/LLamaSharp</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&amp;v=4</PackageIconUrl>
<PackageTags>LLama, LLM, GPT, ChatGPT, kernel-memory, vector search, SciSharp</PackageTags>
<Description>
The integration of LLamaSharp and Microsoft kernel-memory. It could make it easy to support document search for LLamaSharp model inference.
</Description>
<PackageReleaseNotes>
Support integration with kernel-memory
</PackageReleaseNotes>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<PackageOutputPath>packages</PackageOutputPath>
<Platforms>AnyCPU;x64;Arm64</Platforms>
<PackageId>LLamaSharp.kernel-memory</PackageId>
<Configurations>Debug;Release;GPU</Configurations>
</PropertyGroup>
<ItemGroup>

View File

@ -10,8 +10,8 @@
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<Version>0.6.2-beta1</Version>
<Authors>Tim Miller</Authors>
<Version>0.7.1</Version>
<Authors>Tim Miller, Xbotter</Authors>
<Company>SciSharp STACK</Company>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<Copyright>MIT, SciSharp STACK $([System.DateTime]::UtcNow.ToString(yyyy))</Copyright>
@ -20,7 +20,7 @@
<PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&amp;v=4</PackageIconUrl>
<PackageTags>LLama, LLM, GPT, ChatGPT, semantic-kernel, SciSharp</PackageTags>
<Description>
The integration of LLamaSharp ans semantic-kernel.
The integration of LLamaSharp and Microsoft semantic-kernel.
</Description>
<PackageReleaseNotes>
Support integration with semantic-kernel

View File

@ -1,11 +1,14 @@
using LLama.Abstractions;
using LLama.Common;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using static LLama.InteractiveExecutor;
namespace LLama
{
@ -95,11 +98,11 @@ namespace LLama
Directory.CreateDirectory(path);
}
_executor.Context.SaveState(Path.Combine(path, _modelStateFilename));
if(Executor is StatelessExecutor)
if (Executor is StatelessExecutor)
{
}
else if(Executor is StatefulExecutorBase statefulExecutor)
else if (Executor is StatefulExecutorBase statefulExecutor)
{
statefulExecutor.SaveState(Path.Combine(path, _executorStateFilename));
}
@ -135,46 +138,90 @@ namespace LLama
}
/// <summary>
/// Get the response from the LLama model. Note that prompt could not only be the preset words,
/// but also the question you want to ask.
/// Generates a response for a given user prompt and manages history state for the user.
/// This will always pass the whole history to the model. Don't pass a whole history
/// to this method as the user prompt will be appended to the history of the current session.
/// If more control is needed, use the other overload of this method that accepts a ChatHistory object.
/// </summary>
/// <param name="prompt"></param>
/// <param name="inferenceParams"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <returns>Returns generated text of the assistant message.</returns>
public async IAsyncEnumerable<string> ChatAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
foreach(var inputTransform in InputTransformPipeline)
foreach (var inputTransform in InputTransformPipeline)
prompt = inputTransform.Transform(prompt);
History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.User, prompt).Messages);
History.Messages.Add(new ChatHistory.Message(AuthorRole.User, prompt));
if (_executor is InteractiveExecutor executor)
{
InteractiveExecutorState state = (InteractiveExecutorState)executor.GetStateData();
prompt = state.IsPromptRun
? HistoryTransform.HistoryToText(History)
: prompt;
}
StringBuilder sb = new();
await foreach (var result in ChatAsyncInternal(prompt, inferenceParams, cancellationToken))
{
yield return result;
sb.Append(result);
}
History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.Assistant, sb.ToString()).Messages);
string assistantMessage = sb.ToString();
// Remove end tokens from the assistant message
// if defined in inferenceParams.AntiPrompts.
// We only want the response that was generated and not tokens
// that are delimiting the beginning or end of the response.
if (inferenceParams?.AntiPrompts != null)
{
foreach (var stopToken in inferenceParams.AntiPrompts)
{
assistantMessage = assistantMessage.Replace(stopToken, "");
}
}
History.Messages.Add(new ChatHistory.Message(AuthorRole.Assistant, assistantMessage));
}
/// <summary>
/// Get the response from the LLama model with chat histories.
/// Generates a response for a given chat history. This method does not manage history state for the user.
/// If you want to e.g. truncate the history of a session to fit into the model's context window,
/// use this method and pass the truncated history to it. If you don't need this control, use the other
/// overload of this method that accepts a user prompt instead.
/// </summary>
/// <param name="history"></param>
/// <param name="inferenceParams"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <returns>Returns generated text of the assistant message.</returns>
public async IAsyncEnumerable<string> ChatAsync(ChatHistory history, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
var prompt = HistoryTransform.HistoryToText(history);
History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.User, prompt).Messages);
StringBuilder sb = new();
if (history.Messages.Count == 0)
{
throw new ArgumentException("History must contain at least one message.");
}
string prompt;
if (_executor is InteractiveExecutor executor)
{
InteractiveExecutorState state = (InteractiveExecutorState)executor.GetStateData();
prompt = state.IsPromptRun
? HistoryTransform.HistoryToText(History)
: history.Messages.Last().Content;
}
else
{
prompt = history.Messages.Last().Content;
}
await foreach (var result in ChatAsyncInternal(prompt, inferenceParams, cancellationToken))
{
yield return result;
sb.Append(result);
}
History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.Assistant, sb.ToString()).Messages);
}
private async IAsyncEnumerable<string> ChatAsyncInternal(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)

View File

@ -54,6 +54,12 @@ For [microsoft semantic-kernel](https://github.com/microsoft/semantic-kernel) in
LLamaSharp.semantic-kernel
```
For [microsoft kernel-memory](https://github.com/microsoft/kernel-memory) integration, please search and install the following package (currently kernel-memory only supports net6.0):
```
LLamaSharp.kernel-memory
```
### Tips for choosing a version
In general, there may be some break changes between two minor releases, for example 0.5.1 and 0.6.0. On the contrary, we don't introduce API break changes in patch release. Therefore it's recommended to keep the highest patch version of a minor release. For example, keep 0.5.6 instead of 0.5.3.
@ -196,7 +202,7 @@ Another choice is generate gguf format file yourself with a pytorch weight (or a
🔳 Fine-tune
⚠️ Local document search (enabled by kernel-memory now)
Local document search (enabled by kernel-memory now)
🔳 MAUI Integration