diff --git a/LLama.Examples/NewVersion/CodingAssistant.cs b/LLama.Examples/NewVersion/CodingAssistant.cs
index 69e997d3..9108e01d 100644
--- a/LLama.Examples/NewVersion/CodingAssistant.cs
+++ b/LLama.Examples/NewVersion/CodingAssistant.cs
@@ -31,7 +31,7 @@
};
using var model = LLamaWeights.LoadFromFile(parameters);
using var context = model.CreateContext(parameters);
- var executor = new InstructExecutor(context, InstructionPrefix, InstructionSuffix);
+ var executor = new InstructExecutor(context, null!, InstructionPrefix, InstructionSuffix);
Console.ForegroundColor = ConsoleColor.Yellow;
Console.WriteLine("The executor has been enabled. In this example, the LLM will follow your instructions." +
diff --git a/LLama.Web/Models/LLamaModel.cs b/LLama.Web/Models/LLamaModel.cs
index 71bb290e..5aedc5f5 100644
--- a/LLama.Web/Models/LLamaModel.cs
+++ b/LLama.Web/Models/LLamaModel.cs
@@ -10,6 +10,7 @@ namespace LLama.Web.Models
///
public class LLamaModel : IDisposable
{
+ private readonly ILogger _llamaLogger;
private readonly ModelOptions _config;
private readonly LLamaWeights _weights;
private readonly ConcurrentDictionary _contexts;
@@ -18,9 +19,10 @@ namespace LLama.Web.Models
/// Initializes a new instance of the class.
///
/// The model parameters.
- public LLamaModel(ModelOptions modelParams)
+ public LLamaModel(ModelOptions modelParams, ILogger llamaLogger)
{
_config = modelParams;
+ _llamaLogger = llamaLogger;
_weights = LLamaWeights.LoadFromFile(modelParams);
_contexts = new ConcurrentDictionary();
}
@@ -56,7 +58,7 @@ namespace LLama.Web.Models
if (_config.MaxInstances > -1 && ContextCount >= _config.MaxInstances)
throw new Exception($"Maximum model instances reached");
- context = _weights.CreateContext(_config);
+ context = _weights.CreateContext(_config, _llamaLogger);
if (_contexts.TryAdd(contextName, context))
return Task.FromResult(context);
diff --git a/LLama.Web/Services/ModelService.cs b/LLama.Web/Services/ModelService.cs
index 2a3d4788..dfb34bb6 100644
--- a/LLama.Web/Services/ModelService.cs
+++ b/LLama.Web/Services/ModelService.cs
@@ -11,6 +11,7 @@ namespace LLama.Web.Services
///
public class ModelService : IModelService
{
+ private readonly ILogger _llamaLogger;
private readonly AsyncLock _modelLock;
private readonly AsyncLock _contextLock;
private readonly LLamaOptions _configuration;
@@ -22,8 +23,9 @@ namespace LLama.Web.Services
///
/// The logger.
/// The options.
- public ModelService(LLamaOptions configuration)
+ public ModelService(LLamaOptions configuration, ILogger llamaLogger)
{
+ _llamaLogger = llamaLogger;
_modelLock = new AsyncLock();
_contextLock = new AsyncLock();
_configuration = configuration;
@@ -52,7 +54,7 @@ namespace LLama.Web.Services
await UnloadModels();
- model = new LLamaModel(modelOptions);
+ model = new LLamaModel(modelOptions, _llamaLogger);
_modelInstances.TryAdd(modelOptions.Name, model);
return model;
}
diff --git a/LLama/LLamaInstructExecutor.cs b/LLama/LLamaInstructExecutor.cs
index 6faa3db2..dab34106 100644
--- a/LLama/LLamaInstructExecutor.cs
+++ b/LLama/LLamaInstructExecutor.cs
@@ -9,6 +9,7 @@ using System.Text.Json;
using System.Text.Json.Serialization;
using System.Threading.Tasks;
using LLama.Extensions;
+using Microsoft.Extensions.Logging;
namespace LLama
{
@@ -27,10 +28,11 @@ namespace LLama
///
///
///
+ ///
///
///
- public InstructExecutor(LLamaContext context, string instructionPrefix = "\n\n### Instruction:\n\n",
- string instructionSuffix = "\n\n### Response:\n\n") : base(context)
+ public InstructExecutor(LLamaContext context, ILogger logger = null!, string instructionPrefix = "\n\n### Instruction:\n\n",
+ string instructionSuffix = "\n\n### Response:\n\n") : base(context, logger)
{
_inp_pfx = Context.Tokenize(instructionPrefix, true);
_inp_sfx = Context.Tokenize(instructionSuffix, false);
diff --git a/LLama/LLamaInteractExecutor.cs b/LLama/LLamaInteractExecutor.cs
index ab403212..0f374e09 100644
--- a/LLama/LLamaInteractExecutor.cs
+++ b/LLama/LLamaInteractExecutor.cs
@@ -9,6 +9,7 @@ using System.Text.Json;
using System.Text.Json.Serialization;
using System.Threading.Tasks;
using LLama.Extensions;
+using Microsoft.Extensions.Logging;
namespace LLama
{
@@ -25,7 +26,8 @@ namespace LLama
///
///
///
- public InteractiveExecutor(LLamaContext context) : base(context)
+ ///
+ public InteractiveExecutor(LLamaContext context, ILogger logger = null!) : base(context, logger)
{
_llama_token_newline = NativeApi.llama_token_nl(Context.NativeHandle);
}
diff --git a/LLama/LLamaStatelessExecutor.cs b/LLama/LLamaStatelessExecutor.cs
index 3ff755a0..e5348bb4 100644
--- a/LLama/LLamaStatelessExecutor.cs
+++ b/LLama/LLamaStatelessExecutor.cs
@@ -7,6 +7,7 @@ using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using LLama.Extensions;
+using Microsoft.Extensions.Logging;
namespace LLama
{
@@ -19,6 +20,7 @@ namespace LLama
public class StatelessExecutor
: ILLamaExecutor
{
+ private readonly ILogger? _logger;
private readonly LLamaWeights _weights;
private readonly IModelParams _params;
@@ -32,8 +34,10 @@ namespace LLama
///
///
///
- public StatelessExecutor(LLamaWeights weights, IModelParams @params)
+ ///
+ public StatelessExecutor(LLamaWeights weights, IModelParams @params, ILogger logger = null!)
{
+ _logger = logger;
_weights = weights;
_params = @params;
diff --git a/LLama/LLamaWeights.cs b/LLama/LLamaWeights.cs
index 1b067f1b..d841d5a9 100644
--- a/LLama/LLamaWeights.cs
+++ b/LLama/LLamaWeights.cs
@@ -3,6 +3,7 @@ using System.Text;
using LLama.Abstractions;
using LLama.Extensions;
using LLama.Native;
+using Microsoft.Extensions.Logging;
namespace LLama
{
@@ -72,10 +73,11 @@ namespace LLama
/// Create a llama_context using this model
///
///
+ ///
///
- public LLamaContext CreateContext(IModelParams @params)
+ public LLamaContext CreateContext(IModelParams @params, ILogger logger = default!)
{
- return new LLamaContext(this, @params);
+ return new LLamaContext(this, @params, logger);
}
}
}