Skip to content

Commit 374d3eb

Browse files
committed
update the llm setup
1 parent 8583db8 commit 374d3eb

File tree

2 files changed

+51
-12
lines changed

2 files changed

+51
-12
lines changed

README.md

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -70,18 +70,23 @@ This is a tutorial project of [Pocket Flow](https://github.com/The-Pocket/Pocket
7070
pip install -r requirements.txt
7171
```
7272

73-
3. Generate a complete codebase tutorial by running the main script:
74-
```bash
75-
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
76-
```
77-
- `repo_url` - URL of the GitHub repository (required)
78-
- `-n, --name` - Project name (optional, derived from URL if omitted)
79-
- `-t, --token` - GitHub token (or set GITHUB_TOKEN environment variable)
80-
- `-o, --output` - Output directory (default: ./output)
81-
- `-i, --include` - Files to include (e.g., "*.py" "*.js")
82-
- `-e, --exclude` - Files to exclude (e.g., "tests/*" "docs/*")
83-
- `-s, --max-size` - Maximum file size in bytes (default: 100KB)
84-
73+
3. Set up LLM in [`utils/call_llm.py`](./utils/call_llm.py) by providing credentials (API key or project name). We highly recommend the latest models with thinking capabilities (Gemini Pro 2.5, Claude 3.7 with thinking, O1). You can verify if it is correctly set up by running:
74+
```bash
75+
python utils/call_llm.py
76+
```
77+
78+
4. Generate a complete codebase tutorial by running the main script:
79+
```bash
80+
python main.py https://github.com/username/repo --include "*.py" "*.js" --exclude "tests/*" --max-size 50000
81+
```
82+
- `repo_url` - URL of the GitHub repository (required)
83+
- `-n, --name` - Project name (optional, derived from URL if omitted)
84+
- `-t, --token` - GitHub token (or set GITHUB_TOKEN environment variable)
85+
- `-o, --output` - Output directory (default: ./output)
86+
- `-i, --include` - Files to include (e.g., "*.py" "*.js")
87+
- `-e, --exclude` - Files to exclude (e.g., "tests/*" "docs/*")
88+
- `-s, --max-size` - Maximum file size in bytes (default: 100KB)
89+
8590
The application will crawl the repository, analyze the codebase structure, generate tutorial content, and save the output in the specified directory (default: ./output).
8691

8792

utils/call_llm.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
# Simple cache configuration
2121
cache_file = "llm_cache.json"
2222

23+
# By default, we Google Gemini 2.5 pro, as it shows great performance for code understanding
2324
def call_llm(prompt: str, use_cache: bool = True) -> str:
2425
# Log the prompt
2526
logger.info(f"PROMPT: {prompt}")
@@ -43,6 +44,7 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
4344
# Call the LLM if not in cache or cache disabled
4445
client = genai.Client(
4546
vertexai=True,
47+
# TODO: change to your own project id and location
4648
project=os.getenv("GEMINI_PROJECT_ID", "your-project-id"),
4749
location=os.getenv("GEMINI_LOCATION", "us-central1")
4850
)
@@ -77,6 +79,38 @@ def call_llm(prompt: str, use_cache: bool = True) -> str:
7779

7880
return response_text
7981

82+
# # Use Anthropic Claude 3.7 Sonnet Extended Thinking
83+
# def call_llm(prompt, use_cache: bool = True):
84+
# from anthropic import Anthropic
85+
# client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY", "your-api-key"))
86+
# response = client.messages.create(
87+
# model="claude-3-7-sonnet-20250219",
88+
# max_tokens=21000,
89+
# thinking={
90+
# "type": "enabled",
91+
# "budget_tokens": 20000
92+
# },
93+
# messages=[
94+
# {"role": "user", "content": prompt}
95+
# ]
96+
# )
97+
# return response.content[1].text
98+
99+
# # Use OpenAI o1
100+
# def call_llm(prompt, use_cache: bool = True):
101+
# from openai import OpenAI
102+
# client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
103+
# r = client.chat.completions.create(
104+
# model="o1",
105+
# messages=[{"role": "user", "content": prompt}],
106+
# response_format={
107+
# "type": "text"
108+
# },
109+
# reasoning_effort="medium",
110+
# store=False
111+
# )
112+
# return r.choices[0].message.content
113+
80114
if __name__ == "__main__":
81115
test_prompt = "Hello, how are you?"
82116

0 commit comments

Comments
 (0)