fix: typos.

This commit is contained in:
Rinne 2024-04-29 18:19:20 +08:00
parent de31a06a4a
commit 495177fd0f
32 changed files with 43 additions and 43 deletions

View File

@ -80,7 +80,7 @@ public class BatchedExecutorSaveAndLoad
// Continue generating text
await GenerateTokens(executor, conversation, sampler, decoder, n_len);
// Display final ouput
// Display final output
AnsiConsole.MarkupLine($"[red]{prompt}{decoder.Read()}[/]");
}

View File

@ -95,7 +95,7 @@ namespace LLama.Examples.Examples
Console.WriteLine();
// Initilize Images in executor
// Initialize Images in executor
//
foreach (var image in imagePaths)
{

View File

@ -124,7 +124,7 @@ In case of inaudible sentences that might be, assume they're saying 'stop'.
int totalNonBlankClips; // ..but for example's sake they work on a
int nonIdleTime; // ..clip-based quant-length (1 = clipLength).
// Default detection settings: A speech of 750ms, followed by pause of 500ms. (2x250ms)
public (int minBlanksPerSeperation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);
public (int minBlanksPerSeparation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);
public HashSet<ISpeechListener> ServiceUsers = [];
@ -156,7 +156,7 @@ In case of inaudible sentences that might be, assume they're saying 'stop'.
// Compare the volume with the threshold and act accordingly. Once an interesting and 'full' set of clips pops up, serve it.
if (maxVolume >= voiceDetectionThreshold) { currentBlankClips = 0; totalNonBlankClips++; nonIdleTime++; }
else if (++currentBlankClips < detectionSettings.minBlanksPerSeperation) { nonIdleTime++; }
else if (++currentBlankClips < detectionSettings.minBlanksPerSeparation) { nonIdleTime++; }
else
{
if (totalNonBlankClips >= detectionSettings.minNonBlanksForValidMessages) { SendTranscription(); }

View File

@ -1,7 +1,7 @@
namespace LLama.Web.Async
{
/// <summary>
/// Create an Async locking using statment
/// Create an Async locking using statement
/// </summary>
public sealed class AsyncLock
{

View File

@ -34,14 +34,14 @@ namespace LLama.Web
private static List<string> CombineCSV(List<string> list, string csv)
{
var results = list is null || list.Count == 0
? CommaSeperatedToList(csv)
: CommaSeperatedToList(csv).Concat(list);
? CommaSeparatedToList(csv)
: CommaSeparatedToList(csv).Concat(list);
return results
.Distinct()
.ToList();
}
private static List<string> CommaSeperatedToList(string value)
private static List<string> CommaSeparatedToList(string value)
{
if (string.IsNullOrEmpty(value))
return new List<string>();

View File

@ -30,7 +30,7 @@ namespace LLama.Web.Hubs
{
_logger.Log(LogLevel.Information, "[OnDisconnectedAsync], Id: {0}", Context.ConnectionId);
// Remove connections session on dissconnect
// Remove connections session on disconnect
await _modelSessionService.CloseAsync(Context.ConnectionId);
await base.OnDisconnectedAsync(exception);
}

View File

@ -1,8 +1,8 @@
## LLama.Web - Basic ASP.NET Core examples of LLamaSharp in action
LLama.Web has no heavy dependencies and no extra frameworks ove bootstrap and jquery to keep the examples clean and easy to copy over to your own project
LLama.Web has no heavy dependencies and no extra frameworks over bootstrap and jquery to keep the examples clean and easy to copy over to your own project
## Websockets
Using signalr websockets simplifys the streaming of responses and model per connection management
Using signalr websockets simplifies the streaming of responses and model per connection management
@ -23,7 +23,7 @@ Example:
{
"Name": "Alpaca",
"Path": "D:\\Repositories\\AI\\Prompts\\alpaca.txt",
"Prompt": "Alternativly to can set a prompt text directly and omit the Path"
"Prompt": "Alternatively to can set a prompt text directly and omit the Path"
"AntiPrompt": [
"User:"
],

View File

@ -8,7 +8,7 @@ namespace LLama.Web.Services
{
/// <summary>
/// Sercive for handling Models,Weights & Contexts
/// Service for handling Models,Weights & Contexts
/// </summary>
public class ModelService : IModelService
{

View File

@ -6,7 +6,7 @@ using LLama.Sampling;
namespace LLama.Abstractions
{
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public interface IInferenceParams
{

View File

@ -20,7 +20,7 @@ namespace LLama.Abstractions
/// </summary>
public bool IsMultiModal { get; }
/// <summary>
/// Muti-Modal Projections / Clip Model weights
/// Multi-Modal Projections / Clip Model weights
/// </summary>
public LLavaWeights? ClipModel { get; }

View File

@ -232,7 +232,7 @@ namespace LLama.Abstractions
public sealed record MetadataOverride
{
/// <summary>
/// Get the key being overriden by this override
/// Get the key being overridden by this override
/// </summary>
public string Key { get; }

View File

@ -545,7 +545,7 @@ public class ChatSession
InferenceParams? inferenceParams = null,
[EnumeratorCancellation] CancellationToken cancellationToken = default)
{
// Make sure the last message is an assistant message (reponse from the LLM).
// Make sure the last message is an assistant message (response from the LLM).
ChatHistory.Message? lastAssistantMessage = History.Messages.LastOrDefault();
if (lastAssistantMessage is null

View File

@ -7,7 +7,7 @@ using LLama.Sampling;
namespace LLama.Common
{
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public record InferenceParams
: IInferenceParams

View File

@ -6,7 +6,7 @@ using LLama.Native;
namespace LLama.Extensions
{
/// <summary>
/// Extention methods to the IContextParams interface
/// Extension methods to the IContextParams interface
/// </summary>
public static class IContextParamsExtensions
{

View File

@ -7,7 +7,7 @@ using LLama.Native;
namespace LLama.Extensions;
/// <summary>
/// Extention methods to the IModelParams interface
/// Extension methods to the IModelParams interface
/// </summary>
public static class IModelParamsExtensions
{

View File

@ -628,7 +628,7 @@ namespace LLama
}
/// <summary>
/// Copy bytes to a desintation pointer.
/// Copy bytes to a destination pointer.
/// </summary>
/// <param name="dst">Destination to write to</param>
/// <param name="length">Length of the destination buffer</param>

View File

@ -209,7 +209,7 @@ namespace LLama
/// <summary>
/// Try to reuse the matching prefix from the session file.
/// </summary>
protected virtual void TryReuseMathingPrefix()
protected virtual void TryReuseMatchingPrefix()
{
if (_n_session_consumed < _session_tokens.Count)
{

View File

@ -189,7 +189,7 @@ namespace LLama
HandleRunOutOfContext(inferenceParams.TokensKeep);
}
TryReuseMathingPrefix();
TryReuseMatchingPrefix();
var (result, _) = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
if (result != DecodeResult.Ok)
@ -259,7 +259,7 @@ namespace LLama
return Task.CompletedTask;
}
/// <summary>
/// The desciptor of the state of the instruct executor.
/// The descriptor of the state of the instruct executor.
/// </summary>
public class InstructExecutorState : ExecutorBaseState
{

View File

@ -234,7 +234,7 @@ namespace LLama
HandleRunOutOfContext(inferenceParams.TokensKeep);
}
TryReuseMathingPrefix();
TryReuseMatchingPrefix();
// Changes to support Multi-Modal LLMs.
//

View File

@ -63,7 +63,7 @@ namespace LLama
/// <inheritdoc />
public async IAsyncEnumerable<string> InferAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
// Ensure the context from last time is disposed (it always hould be)
// Ensure the context from last time is disposed (it always should be)
if (!Context.NativeHandle.IsClosed)
Context.Dispose();

View File

@ -21,7 +21,7 @@ public static unsafe partial class NativeApi
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
/// <param name="n_threads">Number of threads</param>
/// <param name="image_bytes">Binary image in jpeg format</param>
/// <param name="image_bytes_length">Bytes lenght of the image</param>
/// <param name="image_bytes_length">Bytes length of the image</param>
/// <returns>SafeHandle to the Embeddings</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_bytes",
CallingConvention = CallingConvention.Cdecl)]
@ -35,7 +35,7 @@ public static unsafe partial class NativeApi
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
/// <param name="n_threads">Number of threads</param>
/// <param name="image_path">Image filename (jpeg) to generate embeddings</param>
/// <returns>SafeHandel to the embeddings</returns>
/// <returns>SafeHandle to the embeddings</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_filename", CallingConvention = CallingConvention.Cdecl)]
public static extern
SafeLlavaImageEmbedHandle llava_image_embed_make_with_filename(SafeLlavaModelHandle ctx_clip, int n_threads,

View File

@ -34,7 +34,7 @@ namespace LLama.Native
"3. One of the dependency of the native library is missed. Please use `ldd` on linux, `dumpbin` on windows and `otool`" +
"to check if all the dependency of the native library is satisfied. Generally you could find the libraries under your output folder.\n" +
"4. Try to compile llama.cpp yourself to generate a libllama library, then use `LLama.Native.NativeLibraryConfig.WithLibrary` " +
"to specify it at the very beginning of your code. For more informations about compilation, please refer to LLamaSharp repo on github.\n");
"to specify it at the very beginning of your code. For more information about compilation, please refer to LLamaSharp repo on github.\n");
}
// Now that the "loaded" flag is set configure logging in llama.cpp

View File

@ -101,7 +101,7 @@ namespace LLama.Native
}
/// <summary>
/// Add self-defined search directories. Note that the file stucture of the added
/// Add self-defined search directories. Note that the file structure of the added
/// directories must be the same as the default directory. Besides, the directory
/// won't be used recursively.
/// </summary>
@ -116,7 +116,7 @@ namespace LLama.Native
}
/// <summary>
/// Add self-defined search directories. Note that the file stucture of the added
/// Add self-defined search directories. Note that the file structure of the added
/// directories must be the same as the default directory. Besides, the directory
/// won't be used recursively.
/// </summary>

View File

@ -175,7 +175,7 @@ For more examples, please refer to [LLamaSharp.Examples](./LLama.Examples).
#### Why GPU is not used when I have installed CUDA
1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
#### Why the inference is slow

View File

@ -98,7 +98,7 @@ namespace LLama.Examples.Examples
Console.WriteLine();
// Initilize Images in executor
// Initialize Images in executor
//
foreach (var image in imagePaths)
{

View File

@ -1,11 +1,11 @@
# Frequently asked qustions
# Frequently asked questions
Sometimes, your application with LLM and LLamaSharp may have unexpected behaviours. Here are some frequently asked questions, which may help you to deal with your problem.
## Why GPU is not used when I have installed CUDA
1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
## Why the inference is slow

View File

@ -169,7 +169,7 @@ do
Console.WriteLine();
// Initilize Images in executor
// Initialize Images in executor
//
ex.ImagePaths = imagePaths.ToList();
}

View File

@ -23,7 +23,7 @@ public interface ILLamaExecutor
/// </summary>
public bool IsMultiModal { get; }
/// <summary>
/// Muti-Modal Projections / Clip Model weights
/// Multi-Modal Projections / Clip Model weights
/// </summary>
public LLavaWeights? ClipModel { get; }
@ -110,7 +110,7 @@ At this time, by repeating the same mode of `Q: xxx? A: xxx.`, LLM outputs the a
## BatchedExecutor
Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and geneate outputs for them at the same time. Here is an example to use it.
Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and generate outputs for them at the same time. Here is an example to use it.
```cs
using LLama.Batched;
@ -249,7 +249,7 @@ Here is the parameters for LLamaSharp executors.
```cs
/// <summary>
/// The paramters used for inference.
/// The parameters used for inference.
/// </summary>
public record InferenceParams
: IInferenceParams

View File

@ -8,7 +8,7 @@ As indicated in [Architecture](../Architecture.md), LLamaSharp uses the native l
Before introducing the way to customize native library loading, please follow the tips below to see if you need to compile the native library yourself, rather than use the published backend packages, which contain native library files for multiple targets.
1. Your device/environment has not been supported by any published backend packages. For example, vulkan has not been supported yet. In this case, it will mean a lot to open an issue to tell us you are using it. Since our support for new backend will have a delay, you could compile yourself before that.
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library youself, following the [instructions here](../ContributingGuide.md).
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library yourself, following the [instructions here](../ContributingGuide.md).
3. You want to debug the c++ code.

View File

@ -11,7 +11,7 @@ If you are new to LLM, here're some tips for you to help you to get start with `
1. The main ability of LLamaSharp is to provide an efficient way to run inference of LLM on your device (and fine-tune model in the future). The model weights, however, need to be downloaded from other resources such as [huggingface](https://huggingface.co).
2. To gain high performance, LLamaSharp interacts with a native library compiled from c++, which is called `backend`. We provide backend packages for Windows, Linux and MAC with CPU, Cuda, Metal and OpenCL. You **don't** need to handle anything about c++ but just install the backend packages. If no published backend match your device, please open an issue to let us know. If compiling c++ code is not difficult for you, you could also follow [this guide]() to compile a backend and run LLamaSharp with it.
3. `LLaMA` originally refers to the weights released by Meta (Facebook Research). After that, many models are fine-tuned based on it, such as `Vicuna`, `GPT4All`, and `Pyglion`. There are two popular file format of these model now, which are PyTorch format (.pth) and Huggingface format (.bin). LLamaSharp uses `GGUF` format file, which could be converted from these two formats. There are two options for you to get GGUF format file. a) Search model name + 'gguf' in [Huggingface](https://huggingface.co), you will find lots of model files that have already been converted to GGUF format. Please take care of the publishing time of them because some old ones could only work with old version of LLamaSharp. b) Convert PyTorch or Huggingface format to GGUF format yourself. Please follow the instructions of [this part of llama.cpp readme](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#prepare-and-quantize) to convert them with the python scripts.
4. LLamaSharp supports multi-modal, which means that the model could take both text and image as input. Note that there are two model files requied for using multi-modal (LLaVA), which are main model and mm-proj model. Here is a huggingface repo which shows that: [link](https://huggingface.co/ShadowBeast/llava-v1.6-mistral-7b-Q5_K_S-GGUF/tree/main).
4. LLamaSharp supports multi-modal, which means that the model could take both text and image as input. Note that there are two model files required for using multi-modal (LLaVA), which are main model and mm-proj model. Here is a huggingface repo which shows that: [link](https://huggingface.co/ShadowBeast/llava-v1.6-mistral-7b-Q5_K_S-GGUF/tree/main).

View File

@ -2,7 +2,7 @@
Namespace: LLama.Abstractions
The paramters used for inference.
The parameters used for inference.
```csharp
public interface IInferenceParams

View File

@ -2,7 +2,7 @@
Namespace: LLama.Common
The paramters used for inference.
The parameters used for inference.
```csharp
public class InferenceParams : LLama.Abstractions.IInferenceParams, System.IEquatable`1[[LLama.Common.InferenceParams, LLamaSharp, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null]]