commit c9a0d9a225dc6d5dda92452919ab0c36462fb14d Author: Alex Selimov Date: Sat Jun 7 22:10:38 2025 -0400 Initial commit with working cli prototype diff --git a/roadmap.md b/roadmap.md new file mode 100644 index 0000000..440edca --- /dev/null +++ b/roadmap.md @@ -0,0 +1,61 @@ +# ReviewLlama Technical Roadmap + +## Stage 1: Project Setup & CLI Framework +- [x] Initialize project structure with proper package management +- [x] Implement argument parsing for directory, model, and Ollama server parameters +- [x] Create basic CLI interface with help documentation +- [ ] Set up logging and error handling framework + +## Stage 2: Git Integration +- [ ] Implement git repository detection and validation +- [ ] Build diff extraction functionality between current branch and origin +- [ ] Parse git diff output into structured format (files, hunks, additions/deletions) +- [ ] Handle edge cases (new files, deletions, binary files, merge conflicts) + +## Stage 3: Ollama Client Integration +- [ ] Create HTTP client for Ollama API communication +- [ ] Implement model availability checking and validation +- [ ] Build request/response handling with proper error management +- [ ] Add connection testing and retry logic + +## Stage 4: Code Context Analysis & RAG Preparation +- [ ] Implement file parsing and syntax tree generation for major languages +- [ ] Build code context extraction (function signatures, class definitions, imports) +- [ ] Create code chunking strategy for large files +- [ ] Develop dependency graph analysis for related code understanding + +## Stage 5: RAG Implementation +- [ ] Design vector embedding strategy for code snippets +- [ ] Implement local vector storage (SQLite + embeddings or similar) +- [ ] Build context retrieval system based on code similarity +- [ ] Create context ranking and selection algorithms + +## Stage 6: Review Generation Engine +- [ ] Design prompt templates for different review types (security, performance, style, logic) +- [ ] Implement review request formatting with context injection +- [ ] Build response parsing and suggestion extraction +- [ ] Create confidence scoring for suggestions + +## Stage 7: Interactive Review Interface +- [ ] Implement terminal UI for displaying suggestions +- [ ] Build yes/no selection system with keyboard navigation +- [ ] Create suggestion categorization and filtering +- [ ] Add batch accept/reject functionality + +## Stage 8: Review Application System +- [ ] Implement automatic code modification for accepted suggestions +- [ ] Create backup and rollback mechanisms +- [ ] Build conflict resolution for overlapping changes +- [ ] Add preview mode for showing proposed changes + +## Stage 9: Configuration & Persistence +- [ ] Create configuration file system for user preferences +- [ ] Implement review history and suggestion tracking +- [ ] Build ignore patterns and custom rule systems +- [ ] Add project-specific configuration support + +## Stage 10: Testing & Polish +- [ ] Comprehensive unit and integration testing +- [ ] Performance optimization for large repositories +- [ ] Error handling refinement and user experience improvements +- [ ] Documentation and installation packaging diff --git a/src/reviewllama/__init__.py b/src/reviewllama/__init__.py new file mode 100644 index 0000000..06f2620 --- /dev/null +++ b/src/reviewllama/__init__.py @@ -0,0 +1,5 @@ +from reviewllama.cli import cli + + +def main() -> None: + cli() diff --git a/src/reviewllama/cli.py b/src/reviewllama/cli.py new file mode 100644 index 0000000..ceaffd9 --- /dev/null +++ b/src/reviewllama/cli.py @@ -0,0 +1,146 @@ +import argparse +from dataclasses import dataclass +from typing import List, Optional +from pathlib import Path +import sys + + +@dataclass(frozen=True) +class OllamaConfig: + """Configuration for Ollama client.""" + + model: str + server_url: str + timeout: int + max_retries: int + + +@dataclass(frozen=True) +class ReviewConfig: + """Complete configuration for ReviewLlama.""" + + paths: List[Path] + ollama: OllamaConfig + + +def normalize_server_url(url: str) -> str: + """Normalize Ollama server URL to ensure proper format.""" + if not url.startswith(("http://", "https://")): + return f"http://{url}" + return url.rstrip("/") + + +def create_ollama_config( + model: str, server_url: str, timeout: int, max_retries: int +) -> OllamaConfig: + """Create OllamaConfig with validated parameters.""" + return OllamaConfig( + model=model, + server_url=normalize_server_url(server_url), + timeout=timeout, + max_retries=max_retries, + ) + + +def create_review_config( + paths: List[Path], ollama_config: OllamaConfig +) -> ReviewConfig: + """Create complete ReviewConfig from validated components.""" + return ReviewConfig(paths=paths, ollama=ollama_config) + + +def create_argument_parser() -> argparse.ArgumentParser: + """Create and configure the argument parser.""" + parser = argparse.ArgumentParser( + prog="reviewllama", + description="AI-powered code review assistant", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + reviewllama . --model gemma3:27b --server localhost:11434 + reviewllama src/ tests/ --model llama3.2:7b --timeout 60 + """, + ) + + parser.add_argument( + "paths", + nargs="+", + metavar="PATH", + help="One or more file paths or git directories to review", + ) + + parser.add_argument( + "--model", + default="llama3.2:3b", + help="Ollama model to use for code review (default: %(default)s)", + ) + + parser.add_argument( + "--server", + dest="server_url", + default="localhost:11434", + help="Ollama server URL (default: %(default)s)", + ) + + parser.add_argument( + "--timeout", + type=int, + default=30, + help="Request timeout in seconds (default: %(default)s)", + ) + + parser.add_argument( + "--max-retries", + dest="max_retries", + type=int, + default=3, + help="Maximum number of retry attempts (default: %(default)s)", + ) + + return parser + + +def parse_raw_arguments(args: Optional[List[str]] = None) -> argparse.Namespace: + """Parse command line arguments into raw namespace.""" + parser = create_argument_parser() + return parser.parse_args(args) + + +def transform_namespace_to_config(namespace: argparse.Namespace) -> ReviewConfig: + """Transform argparse namespace into ReviewConfig.""" + paths = [Path(path_str) for path_str in namespace.paths] + + ollama_config = create_ollama_config( + model=namespace.model, + server_url=namespace.server_url, + timeout=namespace.timeout, + max_retries=namespace.max_retries, + ) + + return create_review_config(paths=paths, ollama_config=ollama_config) + + +def parse_arguments(args: Optional[List[str]] = None) -> ReviewConfig: + """Parse command line arguments and return validated configuration.""" + raw_namespace = parse_raw_arguments(args) + return transform_namespace_to_config(raw_namespace) + + +def cli() -> None: + """Main entry point for the CLI.""" + try: + config = parse_arguments() + # TODO: Pass config to review engine + print(f"Reviewing {len(config.paths)} path(s) with model {config.ollama.model}") + for path in config.paths: + print(f" - {path}") + except SystemExit: + # argparse calls sys.exit on error, let it propagate + raise + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..bf575ff --- /dev/null +++ b/uv.lock @@ -0,0 +1,8 @@ +version = 1 +revision = 1 +requires-python = ">=3.13" + +[[package]] +name = "reviewllama" +version = "0.1.0" +source = { editable = "." }