mirror of
https://github.com/mtayfur/openwebui-memory-system.git
synced 2026-01-22 06:51:01 +01:00
Update README and logging messages for model configuration clarity
This commit is contained in:
@@ -66,7 +66,7 @@ Uses OpenWebUI's configured embedding model (supports Ollama, OpenAI, Azure Open
|
||||
## Configuration
|
||||
|
||||
Customize behavior through valves:
|
||||
- **model**: LLM for consolidation and reranking (default: `google/gemini-2.5-flash-lite`)
|
||||
- **model**: LLM for consolidation and reranking. Set to "Default" to use the current chat model, or specify a model ID to use that specific model
|
||||
- **max_message_chars**: Maximum message length before skipping operations (default: 2500)
|
||||
- **max_memories_returned**: Context injection limit (default: 10)
|
||||
- **semantic_retrieval_threshold**: Minimum similarity score (default: 0.5)
|
||||
|
||||
@@ -1665,7 +1665,7 @@ class Filter:
|
||||
model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None)
|
||||
|
||||
if self.valves.memory_model:
|
||||
logger.info(f"🧠 Using the custom model for memory : {model_to_use}")
|
||||
logger.info(f"🤖 Using the custom model for memory : {model_to_use}")
|
||||
|
||||
await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__)
|
||||
|
||||
@@ -1719,7 +1719,7 @@ class Filter:
|
||||
model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None)
|
||||
|
||||
if self.valves.memory_model:
|
||||
logger.info(f"🧠 Using the custom model for memory : {model_to_use}")
|
||||
logger.info(f"🤖 Using the custom model for memory : {model_to_use}")
|
||||
|
||||
await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user