Merge pull request #295 from xbotter/patch/examples

Update examples
This commit is contained in:
Martin Evans 2023-11-15 19:17:38 +00:00 committed by GitHub
commit 0bfc1cbecb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 72 additions and 131 deletions

View File

@ -4,7 +4,7 @@ using System.Text;
using LLama.Common;
using LLama.Native;
namespace LLama.Examples.NewVersion;
namespace LLama.Examples.Examples;
/// <summary>
/// This demonstrates generating multiple replies to the same prompt, with a shared cache

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class ChatSessionStripRoleName
{

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class ChatSessionWithRoleName
{

View File

@ -1,4 +1,4 @@
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
using LLama.Common;
using System;

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class GetEmbeddings
{

View File

@ -1,7 +1,7 @@
using LLama.Common;
using LLama.Grammars;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class GrammarJsonResponse
{

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class InstructModeExecute
{

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class InteractiveModeExecute
{

View File

@ -7,7 +7,7 @@ using LLamaSharp.KernelMemory;
using Microsoft.KernelMemory;
using Microsoft.KernelMemory.Handlers;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class KernelMemory
{

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SaveAndLoadSession
{

View File

@ -1,6 +1,6 @@
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class LoadAndSaveState
{

View File

@ -1,4 +1,4 @@
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class QuantizeModel
{

View File

@ -0,0 +1,53 @@
using Spectre.Console;
namespace LLama.Examples.Examples;
public class Runner
{
static Dictionary<string, Func<Task>> Examples = new()
{
{"Run a chat session without stripping the role names.", () => ChatSessionWithRoleName.Run()},
{"Run a chat session with the role names stripped.",()=> ChatSessionStripRoleName.Run()},
{"Interactive mode chat by using executor.",()=> InteractiveModeExecute.Run()},
{"Instruct mode chat by using executor.",()=> InstructModeExecute.Run()},
{"Stateless mode chat by using executor.",()=> StatelessModeExecute.Run()},
{"Load and save chat session.",()=> SaveAndLoadSession.Run()},
{"Load and save state of model and executor.",()=> LoadAndSaveState.Run()},
{"Get embeddings from LLama model.",()=> Task.Run(GetEmbeddings.Run)},
{"Quantize the model.",()=> Task.Run(QuantizeModel.Run)},
{"Automatic conversation.",()=> TalkToYourself.Run()},
{"Constrain response to json format using grammar.",()=> GrammarJsonResponse.Run()},
{"Semantic Kernel Prompt.",()=> SemanticKernelPrompt.Run()},
{"Semantic Kernel Chat.",()=> SemanticKernelChat.Run()},
{"Semantic Kernel Memory.",()=> SemanticKernelMemory.Run()},
{"Coding Assistant.",()=> CodingAssistant.Run()},
{"Batch Decoding.",()=> BatchedDecoding.Run()},
{"SK Kernel Memory.",()=> KernelMemory.Run()},
{"Exit", ()=> Task.CompletedTask}
};
public static async Task Run()
{
AnsiConsole.Write(new Rule("LLamaSharp Examples"));
while (true)
{
var choice = AnsiConsole.Prompt(
new SelectionPrompt<string>()
.Title("Please choose[green] an example[/] to run: ")
.AddChoices(Examples.Keys));
if (Examples.TryGetValue(choice, out var example))
{
if (choice == "Exit")
{
break;
}
AnsiConsole.Write(new Rule(choice));
await example();
}
AnsiConsole.Clear();
}
}
}

View File

@ -3,7 +3,7 @@ using LLama.Common;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using LLamaSharp.SemanticKernel.ChatCompletion;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelChat
{

View File

@ -5,7 +5,7 @@ using LLamaSharp.SemanticKernel.TextEmbedding;
using Microsoft.SemanticKernel.AI.Embeddings;
using Microsoft.SemanticKernel.Plugins.Memory;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelMemory
{

View File

@ -5,7 +5,7 @@ using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI.TextCompletion;
using LLamaSharp.SemanticKernel.TextCompletion;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelPrompt
{

View File

@ -1,7 +1,7 @@
using LLama.Common;
using LLama.Examples.Extensions;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class StatelessModeExecute
{

View File

@ -3,7 +3,7 @@ using System.Text;
using LLama.Abstractions;
using LLama.Common;
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class TalkToYourself
{

View File

@ -1,112 +0,0 @@
namespace LLama.Examples.NewVersion
{
public class NewVersionTestRunner
{
public static async Task Run()
{
Console.WriteLine("================LLamaSharp Examples (New Version)==================\n");
Console.WriteLine("Please input a number to choose an example to run:");
Console.WriteLine("0: Run a chat session without stripping the role names.");
Console.WriteLine("1: Run a chat session with the role names stripped.");
Console.WriteLine("2: Interactive mode chat by using executor.");
Console.WriteLine("3: Instruct mode chat by using executor.");
Console.WriteLine("4: Stateless mode chat by using executor.");
Console.WriteLine("5: Load and save chat session.");
Console.WriteLine("6: Load and save state of model and executor.");
Console.WriteLine("7: Get embeddings from LLama model.");
Console.WriteLine("8: Quantize the model.");
Console.WriteLine("9: Automatic conversation.");
Console.WriteLine("10: Constrain response to json format using grammar.");
Console.WriteLine("11: Semantic Kernel Prompt.");
Console.WriteLine("12: Semantic Kernel Chat.");
Console.WriteLine("13: Semantic Kernel Memory.");
Console.WriteLine("14: Coding Assistant.");
Console.WriteLine("15: Batch Decoding.");
Console.WriteLine("16: SK Kernel Memory.");
while (true)
{
Console.Write("\nYour choice: ");
int choice = int.Parse(Console.ReadLine());
if (choice == 0)
{
await ChatSessionWithRoleName.Run();
}
else if (choice == 1)
{
await ChatSessionStripRoleName.Run();
}
else if (choice == 2)
{
await InteractiveModeExecute.Run();
}
else if (choice == 3)
{
await InstructModeExecute.Run();
}
else if (choice == 4)
{
await StatelessModeExecute.Run();
}
else if (choice == 5)
{
await SaveAndLoadSession.Run();
}
else if (choice == 6)
{
await LoadAndSaveState.Run();
}
else if (choice == 7)
{
GetEmbeddings.Run();
}
else if (choice == 8)
{
QuantizeModel.Run();
}
else if (choice == 9)
{
await TalkToYourself.Run();
}
else if (choice == 10)
{
await GrammarJsonResponse.Run();
}
else if (choice == 11)
{
await SemanticKernelPrompt.Run();
}
else if (choice == 12)
{
await SemanticKernelChat.Run();
}
else if (choice == 13)
{
await SemanticKernelMemory.Run();
}
else if (choice == 14)
{
await CodingAssistant.Run();
}
else if (choice == 15)
{
await BatchedDecoding.Run();
}
else if (choice == 16)
{
await KernelMemory.Run();
}
else
{
Console.WriteLine("Cannot parse your choice. Please select again.");
continue;
}
break;
}
}
}
}

View File

@ -1,4 +1,4 @@
using LLama.Examples.NewVersion;
using LLama.Examples.Examples;
using LLama.Native;
Console.WriteLine("======================================================================================================");
@ -12,4 +12,4 @@ NativeLibraryConfig.Instance.WithCuda().WithLogs();
NativeApi.llama_empty_call();
Console.WriteLine();
await NewVersionTestRunner.Run();
await Runner.Run();