diff --git a/src/reviewllama/cli.py b/src/reviewllama/cli.py index 289237d..bdac075 100644 --- a/src/reviewllama/cli.py +++ b/src/reviewllama/cli.py @@ -69,24 +69,13 @@ Examples: default=( "You are a PR review assistant in charge of software quality control. " "You analyze code changes in the context of the full code base to verify style, " - "syntax, and functionality. Your response should contain exactly 3 suggestions. " - "Each suggestion must be in the following format:\n" - "```diff\n" - "-\n" - "+\n" - "```\n" - "Reason: \n\n" - "Here are two examples of the required format:\n" - "```diff\n" - "-somvr = 2 + 2\n" - "+somevar = 2 + 2\n" - "```\n" - "Reason: somvr is likely a typo, try replacing with somevar\n\n" - "```diff\n" - "-add_two_numbers(\"1\", \"2\")\n" - "+add_two_numbers(1,2)\n" - "```\n" - "Reason: add_two_numbers requires numeric values and does not accept strings" + "syntax, and functionality. You respond with suggestions to improve code quality " + "You only provide sugestions when you find a flaw in the code otherwise you say that " + "no issues were found. Each suggestion should reference the old code and the new " + "suggested code." + "Do not provide an analysis of the code and do not summarize suggestions. " + "Answer as briefly as possible and return only the suggestions in the requested format " + "with bullet points and no extra text. Provide examples when appropriate." ), help="Base branch to compare against (default: %(default)s)", ) diff --git a/src/reviewllama/configs.py b/src/reviewllama/configs.py index 45e36cb..bce1009 100644 --- a/src/reviewllama/configs.py +++ b/src/reviewllama/configs.py @@ -13,7 +13,7 @@ class OllamaConfig: base_url: str system_prompt: str # TODO: Update this to be a passed in value - temperature: float = field(default=0.7) + temperature: float = field(default=0.0) @dataclass(frozen=True) @@ -30,7 +30,7 @@ def create_ollama_config( model: str, server_url: str, system_prompt: str, - temperature=0.7, + temperature=0.0, embedding_model="nomic-embed-text", ) -> OllamaConfig: """Create OllamaConfig with validated parameters.""" diff --git a/src/reviewllama/llm.py b/src/reviewllama/llm.py index f86cf3f..99e29c4 100644 --- a/src/reviewllama/llm.py +++ b/src/reviewllama/llm.py @@ -2,7 +2,7 @@ from dataclasses import dataclass from typing import Any from langchain_core.messages import BaseMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableLambda from langchain_core.runnables.base import RunnableSerializable from langchain_core.runnables.passthrough import RunnablePassthrough @@ -61,7 +61,6 @@ def chat_with_client( client: ChatClient, message: str, retriever: VectorStoreRetriever | None = None, - session_id: str = "default", verbose: bool = False, ) -> str: """Chat with the client and return the response content.""" @@ -72,7 +71,6 @@ def chat_with_client( response = client.chain.invoke( {"input": message, "context": context}, - config={"configurable": {"session_id": session_id}}, ) return response.content diff --git a/src/reviewllama/reviewllama.py b/src/reviewllama/reviewllama.py index a1cf01f..bab447d 100644 --- a/src/reviewllama/reviewllama.py +++ b/src/reviewllama/reviewllama.py @@ -55,11 +55,5 @@ def get_suggestions( def craft_message(diff) -> str: return ( - "Review the following code changes and make up to three suggestions on " - "how to improve it. If the code is sufficiently simple or accurate then say " - "no suggestions can be found. Important issues you should consider are consistent " - "style, introduction of syntax errors, and potentially breaking changes in " - "interfaces/APIs that aren't properly handled.\n\n" - f"The original code:\n```\n{diff.old_content}\n```\n" f"The new code:\n```\n{diff.new_content}```" )