Update README and logging messages for model configuration clarity

This commit is contained in:
mtayfur
2025-10-28 03:03:11 +03:00
parent 551b0c571b
commit fe3c47f6e4
2 changed files with 3 additions and 3 deletions

View File

@@ -66,7 +66,7 @@ Uses OpenWebUI's configured embedding model (supports Ollama, OpenAI, Azure Open
## Configuration
Customize behavior through valves:
- **model**: LLM for consolidation and reranking (default: `google/gemini-2.5-flash-lite`)
- **model**: LLM for consolidation and reranking. Set to "Default" to use the current chat model, or specify a model ID to use that specific model
- **max_message_chars**: Maximum message length before skipping operations (default: 2500)
- **max_memories_returned**: Context injection limit (default: 10)
- **semantic_retrieval_threshold**: Minimum similarity score (default: 0.5)

View File

@@ -1665,7 +1665,7 @@ class Filter:
model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None)
if self.valves.memory_model:
logger.info(f"🧠 Using the custom model for memory : {model_to_use}")
logger.info(f"🤖 Using the custom model for memory : {model_to_use}")
await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__)
@@ -1719,7 +1719,7 @@ class Filter:
model_to_use = self.valves.memory_model or (body.get("model") if isinstance(body, dict) else None)
if self.valves.memory_model:
logger.info(f"🧠 Using the custom model for memory : {model_to_use}")
logger.info(f"🤖 Using the custom model for memory : {model_to_use}")
await self._set_pipeline_context(__event_emitter__, __user__, model_to_use, __request__)