Improve LangChain memory implementation

- Swap to RunnableWithMemory
- Add verbosity flag
This commit is contained in:
Alex Selimov 2025-07-18 22:21:31 -04:00
parent e59cf01ba9
commit 1c75cfc716
Signed by: aselimov
GPG key ID: 3DDB9C3E023F1F31
5 changed files with 81 additions and 51 deletions

View file

@ -17,11 +17,12 @@ def test_chat_client(ollama_config, chat_client):
if not is_ollama_available(ollama_config):
pytest.skip("Local Ollama server is not available")
chat_client = chat_with_client(
response = chat_with_client(
chat_client, "Tell me your name and introduce yourself briefly"
)
response = chat_client.get_last_response_or_none()
response_from_history = chat_client.get_last_response_or_none().content
assert response is not None
assert len(response.content) > 0
assert "gemma" in response.content.lower()
assert response == response_from_history
assert len(response) > 0
assert "gemma" in response.lower()