ReviewLlama/src/reviewllama/configs.py
Alex Selimov 1c75cfc716
Improve LangChain memory implementation
- Swap to RunnableWithMemory
- Add verbosity flag
2025-07-18 22:21:31 -04:00

67 lines
1.7 KiB
Python

import argparse
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
@dataclass(frozen=True)
class OllamaConfig:
"""Configuration for Ollama client."""
chat_model: str
embedding_model: str
base_url: str
system_prompt: str
# TODO: Update this to be a passed in value
temperature: float = field(default=0.7)
@dataclass(frozen=True)
class ReviewConfig:
"""Complete configuration for ReviewLlama."""
paths: List[Path]
ollama: OllamaConfig
base_branch: str
verbose: bool
def create_ollama_config(
model: str,
server_url: str,
system_prompt: str,
temperature=0.7,
embedding_model="nomic-embed-text",
) -> OllamaConfig:
"""Create OllamaConfig with validated parameters."""
return OllamaConfig(
chat_model=model,
embedding_model=embedding_model,
base_url=server_url,
system_prompt=system_prompt,
temperature=temperature,
)
def create_review_config(
paths: List[Path], ollama_config: OllamaConfig, base_branch: str, verbose
) -> ReviewConfig:
"""Create complete ReviewConfig from validated components."""
return ReviewConfig(
paths=paths, ollama=ollama_config, base_branch=base_branch, verbose=verbose
)
def namespace_to_config(namespace: argparse.Namespace):
"""Transform argparse namespace into ReviewConfig."""
paths = [Path(path_str) for path_str in namespace.paths]
ollama_config = OllamaConfig(
chat_model=namespace.model,
base_url=namespace.server_url,
system_prompt=namespace.system_prompt,
embedding_model=namespace.embedding_model,
)
return create_review_config(
paths, ollama_config, namespace.base_branch, namespace.verbose
)