Bump example, readme
This commit is contained in:
parent
bea1ca9f8d
commit
94a395240a
|
@ -27,8 +27,7 @@ namespace LLama.Examples.NewVersion
|
|||
var parameters = new ModelParams(modelPath)
|
||||
{
|
||||
Seed = seed,
|
||||
EmbeddingMode = true,
|
||||
GpuLayerCount = 50,
|
||||
EmbeddingMode = true
|
||||
};
|
||||
|
||||
using var model = LLamaWeights.LoadFromFile(parameters);
|
||||
|
|
|
@ -6,6 +6,7 @@ For reference on how to implement it, view the following examples:
|
|||
|
||||
- [SemanticKernelChat](../LLama.Examples/NewVersion/SemanticKernelChat.cs)
|
||||
- [SemanticKernelPrompt](../LLama.Examples/NewVersion/SemanticKernelPrompt.cs)
|
||||
- [SemanticKernelMemory](../LLama.Examples/NewVersion/SemanticKernelMemory.cs)
|
||||
|
||||
## ITextCompletion
|
||||
```csharp
|
||||
|
@ -24,3 +25,14 @@ using var context = model.CreateContext(parameters);
|
|||
var ex = new InteractiveExecutor(context);
|
||||
var chatGPT = new LLamaSharpChatCompletion(ex);
|
||||
```
|
||||
|
||||
## ITextEmbeddingGeneration
|
||||
```csharp
|
||||
using var model = LLamaWeights.LoadFromFile(parameters);
|
||||
var embedding = new LLamaEmbedder(model, parameters);
|
||||
var kernelWithCustomDb = Kernel.Builder
|
||||
.WithLoggerFactory(ConsoleLogger.LoggerFactory)
|
||||
.WithAIService<ITextEmbeddingGeneration>("local-llama-embed", new LLamaSharpEmbeddingGeneration(embedding), true)
|
||||
.WithMemoryStorage(new VolatileMemoryStore())
|
||||
.Build();
|
||||
```
|
||||
|
|
Loading…
Reference in New Issue