From fe3c47f6e42ed8bdc4f4c3879e1272f28261d684 Mon Sep 17 00:00:00 2001 From: mtayfur Date: Tue, 28 Oct 2025 03:03:11 +0300 Subject: [PATCH] Update README and logging messages for model configuration clarity --- README.md | 2 +- memory_system.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c32a3ee..595536b 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Uses OpenWebUI's configured embedding model (supports Ollama, OpenAI, Azure Open ## Configuration Customize behavior through valves: -- **model**: LLM for consolidation and reranking (default: `google/gemini-2.5-flash-lite`) +- **model**: LLM for consolidation and reranking. Set to "Default" to use the current chat model, or specify a model ID to use that specific model - **max_message_chars**: Maximum message length before skipping operations (default: 2500) - **max_memories_returned**: Context injection limit (default: 10) - **semantic_retrieval_threshold**: Minimum similarity score (default: 0.5) diff --git a/memory_system.py b/memory_system.py index 4485c61..f3a2bde 100644 --- a/memory_system.py +++ b/memory_system.py @@ -1665,7 +1665,7 @@ class Filter: model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None) if self.valves.memory_model: - logger.info(f"🧠 Using the custom model for memory : {model_to_use}") + logger.info(f"🤖 Using the custom model for memory : {model_to_use}") await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__) @@ -1719,7 +1719,7 @@ class Filter: model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None) if self.valves.memory_model: - logger.info(f"🧠 Using the custom model for memory : {model_to_use}") + logger.info(f"🤖 Using the custom model for memory : {model_to_use}") await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__)