Merge branch 'master' of https://github.com/SciSharp/LLamaSharp into upstream_master

This commit is contained in:
sa_ddam213 2023-08-07 03:34:37 +12:00
commit e02d0c3617
13 changed files with 25 additions and 14 deletions

View File

@ -12,7 +12,7 @@ jobs:
strategy:
fail-fast: false
matrix:
build: [linux-debug, linux-release, macos-debug, macos-release, windows-debug, windows-release]
build: [linux-debug, linux-release, windows-debug, windows-release]
include:
- build: linux-debug
os: ubuntu-latest
@ -20,12 +20,12 @@ jobs:
- build: linux-release
os: ubuntu-latest
config: release
- build: macos-debug
os: macos-latest
config: debug
- build: macos-release
os: macos-latest
config: release
# - build: macos-debug
# os: macos-latest
# config: debug
# - build: macos-release
# os: macos-latest
# config: release
- build: windows-debug
os: windows-2019
config: debug

View File

@ -30,8 +30,11 @@ namespace LLama.Common
/// <param name="data"></param>
public FixedSizeQueue(int size, IEnumerable<T> data)
{
#if NETCOREAPP3_0_OR_GREATER
// Try an early check on the amount of data supplied (if possible)
#if NETSTANDARD2_0
var dataCount = data.Count();
if (data.Count() > size)
throw new ArgumentException($"The max size set for the quene is {size}, but got {dataCount} initial values.");
#else
if (data.TryGetNonEnumeratedCount(out var count) && count > size)
throw new ArgumentException($"The max size set for the quene is {size}, but got {count} initial values.");
#endif
@ -42,9 +45,12 @@ namespace LLama.Common
// Now check if that list is a valid size
if (_storage.Count > _maxSize)
throw new ArgumentException($"The max size set for the quene is {size}, but got {_storage.Count} initial values.");
#if NETSTANDARD2_0
throw new ArgumentException($"The max size set for the quene is {size}, but got {dataCount} initial values.");
#else
throw new ArgumentException($"The max size set for the quene is {size}, but got {count} initial values.");
#endif
}
/// <summary>
/// Replace every item in the queue with the given value
/// </summary>

View File

@ -31,6 +31,10 @@
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Link>libllama.dylib</Link>
</None>
<None Include="$(MSBuildThisFileDirectory)runtimes/libllama-metal.dylib">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Link>libllama-metal.dylib</Link>
</None>
<None Include="$(MSBuildThisFileDirectory)runtimes/ggml-metal.metal">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Link>ggml-metal.metal</Link>

View File

@ -21,7 +21,8 @@ namespace LLama.Native
"1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" +
"2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" +
"3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " +
"expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.");
"expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" +
"4. One of the dependency of the native library is missed.\n");
}
NativeApi.llama_backend_init(false);
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
LLama/runtimes/libllama.dylib Executable file → Normal file

Binary file not shown.

Binary file not shown.

View File

@ -49,7 +49,7 @@ Here's the mapping of them and corresponding model samples provided by `LLamaSha
| v0.2.2 | v0.2.2, v0.2.3 | [WizardLM](https://huggingface.co/TheBloke/wizardLM-7B-GGML/tree/previous_llama_ggmlv2), [Vicuna (filenames without "old")](https://huggingface.co/eachadea/ggml-vicuna-13b-1.1/tree/main) | 63d2046 |
| v0.3.0, v0.3.1 | v0.3.0, v0.4.0 | [LLamaSharpSamples v0.3.0](https://huggingface.co/AsakusaRinne/LLamaSharpSamples/tree/v0.3.0), [WizardLM](https://huggingface.co/TheBloke/wizardLM-7B-GGML/tree/main) | 7e4ea5b |
| v0.4.1-preview (cpu only) | v0.4.1-preview | [Open llama 3b](https://huggingface.co/SlyEcho/open_llama_3b_ggml), [Open Buddy](https://huggingface.co/OpenBuddy/openbuddy-llama-ggml)| aacdbd4 |
| v0.4.2-preview (cpu,cuda11) |v0.4.2-preview | [Llama2 7b](https://huggingface.co/TheBloke/llama-2-7B-Guanaco-QLoRA-GGML)| - |
| v0.4.2-preview (cpu,cuda11) |v0.4.2-preview | [Llama2 7b](https://huggingface.co/TheBloke/llama-2-7B-Guanaco-QLoRA-GGML)| 332311234a0aa2974b2450710e22e09d90dd6b0b |
Many hands make light work. If you have found any other model resource that could work for a version, we'll appreciate it for opening an PR about it! 😊
@ -148,7 +148,7 @@ Since we are in short of hands, if you're familiar with ASP.NET core, we'll appr
⚠️ BotSharp Integration
⚠️ ASP.NET core Integration
ASP.NET core Integration
⚠️ Semantic-kernel Integration