Commit
·
5d46e12
1
Parent(s):
be53f95
formatting fixes
Browse files- app.py +291 -153
- config/clients/gemini_cli.py +15 -5
- config/clients/nebius_cli.py +14 -18
- config/clients/openai_cli.py +13 -17
- config/clients/sambanova_cli.py +26 -10
- config/constants.py +17 -10
- config/database.py +4 -1
- config/database_sqlite.py +1 -1
- config/model_factory.py +12 -5
- requirements.txt +0 -1
app.py
CHANGED
|
@@ -38,14 +38,15 @@ def get_available_models():
|
|
| 38 |
return {
|
| 39 |
"text_models": AVAILABLE_ISSUE_MODELS_BY_PROVIDER,
|
| 40 |
"vision_models": AVAILABLE_SKETCH_MODELS_BY_PROVIDER,
|
| 41 |
-
"providers": list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys())
|
| 42 |
}
|
| 43 |
-
|
|
|
|
| 44 |
def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None, desc: str = ""):
|
| 45 |
"""Helper function to fetch all pages from a paginated GitHub API endpoint."""
|
| 46 |
results = []
|
| 47 |
page_num = 1
|
| 48 |
-
base_url = url.split(
|
| 49 |
|
| 50 |
while True:
|
| 51 |
paginated_url = f"{base_url}&page={page_num}"
|
|
@@ -67,14 +68,18 @@ def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None
|
|
| 67 |
return results
|
| 68 |
|
| 69 |
|
| 70 |
-
def sync_repository(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
"""
|
| 72 |
Performs a lightweight sync of a GitHub repository with the local database.
|
| 73 |
-
|
| 74 |
Args:
|
| 75 |
repo_url (str): The full URL of the GitHub repository (e.g., 'https://github.com/gradio-app/gradio').
|
| 76 |
github_token (str, optional): A GitHub Personal Access Token. Optional. If not provided, the tool will work but may hit public API rate limits.
|
| 77 |
-
|
| 78 |
Returns:
|
| 79 |
str: A status message indicating completion.
|
| 80 |
"""
|
|
@@ -137,7 +142,13 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
|
|
| 137 |
VALUES (%s, %s, %s, %s, %s)
|
| 138 |
ON CONFLICT (tag_name) DO NOTHING;
|
| 139 |
""",
|
| 140 |
-
(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
)
|
| 142 |
conn.commit()
|
| 143 |
conn.close()
|
|
@@ -145,14 +156,15 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
|
|
| 145 |
|
| 146 |
except Exception as e:
|
| 147 |
import traceback
|
|
|
|
| 148 |
traceback.print_exc()
|
| 149 |
-
|
| 150 |
-
error_msg = str(e)
|
| 151 |
if "429" in error_msg or "Rate" in error_msg:
|
| 152 |
-
detailed_msg =
|
| 153 |
else:
|
| 154 |
detailed_msg = f"System Error: {error_msg}"
|
| 155 |
-
|
| 156 |
raise gr.Error(detailed_msg)
|
| 157 |
|
| 158 |
|
|
@@ -164,7 +176,7 @@ def analyze_github_issue(
|
|
| 164 |
github_token: str | None = None,
|
| 165 |
llm_api_key: str | None = None,
|
| 166 |
request: gr.Request = None,
|
| 167 |
-
progress=gr.Progress(track_tqdm=True)
|
| 168 |
):
|
| 169 |
"""
|
| 170 |
Analyzes a single GitHub issue to determine its resolution status by checking PRs and Releases.
|
|
@@ -176,16 +188,15 @@ def analyze_github_issue(
|
|
| 176 |
llm_model (str, optional): The specific model name. Defaults to 'gemini-2.0-flash'.
|
| 177 |
github_token (str, optional): GitHub Token. Optional. Recommended for higher rate limits.
|
| 178 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided (empty string), the server will use its internal environment keys to perform the analysis.
|
| 179 |
-
|
| 180 |
Returns:
|
| 181 |
str: An HTML-formatted analysis report.
|
| 182 |
"""
|
| 183 |
-
|
| 184 |
-
_validate_api_keys(llm_api_key, request)
|
| 185 |
-
|
| 186 |
-
if not repo_url or not issue_number:
|
| 187 |
-
return "## Error\nPlease provide both a repository URL and an issue number."
|
| 188 |
|
|
|
|
|
|
|
|
|
|
| 189 |
repo_slug = "/".join(repo_url.strip().replace("https://github.com/", "").split("/")[:2])
|
| 190 |
headers = {"Authorization": f"token {github_token}"} if github_token else {}
|
| 191 |
conn = connect()
|
|
@@ -214,14 +225,20 @@ def analyze_github_issue(
|
|
| 214 |
pr_urls_to_fetch.append(f"https://api.github.com/repos/{repo_slug}/pulls/{pr_num}")
|
| 215 |
|
| 216 |
for i, url in enumerate(pr_urls_to_fetch):
|
| 217 |
-
progress(
|
|
|
|
|
|
|
|
|
|
| 218 |
pr_res = requests.get(url, headers=headers)
|
| 219 |
if pr_res.status_code == 200:
|
| 220 |
linked_prs_details.append(pr_res.json())
|
| 221 |
|
| 222 |
# 2. GET RELEASE DATA FROM CACHE
|
| 223 |
progress(0.6, desc="Querying local release cache...")
|
| 224 |
-
cursor.execute(
|
|
|
|
|
|
|
|
|
|
| 225 |
releases = cursor.fetchall()
|
| 226 |
conn.close()
|
| 227 |
|
|
@@ -237,7 +254,7 @@ def analyze_github_issue(
|
|
| 237 |
llm_model,
|
| 238 |
messages=[{"role": "user", "content": summarizer_prompt}],
|
| 239 |
temperature=0.0,
|
| 240 |
-
api_key=llm_api_key
|
| 241 |
)
|
| 242 |
except Exception:
|
| 243 |
additional_problems_summary = "Could not summarize comments due to an error."
|
|
@@ -258,7 +275,10 @@ def analyze_github_issue(
|
|
| 258 |
|
| 259 |
release_notes_summary = (
|
| 260 |
"\n\n".join(
|
| 261 |
-
[
|
|
|
|
|
|
|
|
|
|
| 262 |
)
|
| 263 |
if releases
|
| 264 |
else "No releases found in local cache."
|
|
@@ -270,7 +290,7 @@ def analyze_github_issue(
|
|
| 270 |
"issue_state": issue_data["state"],
|
| 271 |
"issue_body": issue_data["body"] or "No description provided.",
|
| 272 |
"additional_problems_summary": additional_problems_summary.strip(),
|
| 273 |
-
"issue_labels": json.dumps([label["name"] for label in issue_data["labels"]]) if issue_data["labels"] else "None",
|
| 274 |
"pull_requests_summary": pull_requests_summary,
|
| 275 |
"release_notes_summary": release_notes_summary,
|
| 276 |
}
|
|
@@ -286,9 +306,9 @@ def analyze_github_issue(
|
|
| 286 |
messages=messages,
|
| 287 |
temperature=0.1,
|
| 288 |
max_tokens=2048,
|
| 289 |
-
api_key=llm_api_key
|
| 290 |
)
|
| 291 |
-
|
| 292 |
report_start_marker = f"## Analysis of Issue #{issue_number}"
|
| 293 |
report_start_index = raw_output.find(report_start_marker)
|
| 294 |
|
|
@@ -316,16 +336,17 @@ def analyze_github_issue(
|
|
| 316 |
|
| 317 |
except Exception as e:
|
| 318 |
import traceback
|
|
|
|
| 319 |
traceback.print_exc()
|
| 320 |
-
|
| 321 |
error_msg = str(e)
|
| 322 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 323 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 324 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 325 |
-
detailed_msg =
|
| 326 |
else:
|
| 327 |
detailed_msg = f"System Error: {error_msg}"
|
| 328 |
-
|
| 329 |
raise gr.Error(detailed_msg)
|
| 330 |
|
| 331 |
|
|
@@ -337,11 +358,11 @@ def find_duplicate_issues(
|
|
| 337 |
github_token: str | None = None,
|
| 338 |
llm_api_key: str | None = None,
|
| 339 |
request: gr.Request = None,
|
| 340 |
-
progress=gr.Progress()
|
| 341 |
):
|
| 342 |
"""
|
| 343 |
Finds potential duplicate issues for a given issue using mentions and keyword search.
|
| 344 |
-
|
| 345 |
Args:
|
| 346 |
repo_url (str): The full URL of the GitHub repository.
|
| 347 |
issue_number (str): The main issue number to check.
|
|
@@ -349,12 +370,12 @@ def find_duplicate_issues(
|
|
| 349 |
llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
|
| 350 |
github_token (str, optional): GitHub Token. Optional.
|
| 351 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
|
| 352 |
-
|
| 353 |
Returns:
|
| 354 |
str: A Markdown list of potential duplicates.
|
| 355 |
"""
|
| 356 |
_validate_api_keys(llm_api_key, request)
|
| 357 |
-
|
| 358 |
if not repo_url or not issue_number:
|
| 359 |
return "Please provide the repository and the main issue number."
|
| 360 |
|
|
@@ -366,9 +387,12 @@ def find_duplicate_issues(
|
|
| 366 |
cursor = conn.cursor()
|
| 367 |
|
| 368 |
progress(0, desc="Fetching main issue from cache...")
|
| 369 |
-
cursor.execute(
|
|
|
|
|
|
|
|
|
|
| 370 |
main_issue = cursor.fetchone()
|
| 371 |
-
|
| 372 |
if not main_issue:
|
| 373 |
conn.close()
|
| 374 |
return "Main issue not found in cache. Please synchronize the repository first."
|
|
@@ -426,46 +450,48 @@ def find_duplicate_issues(
|
|
| 426 |
llm_model,
|
| 427 |
messages=messages,
|
| 428 |
temperature=0.0,
|
| 429 |
-
api_key=llm_api_key
|
| 430 |
)
|
| 431 |
return analysis
|
| 432 |
|
| 433 |
except Exception as e:
|
| 434 |
import traceback
|
|
|
|
| 435 |
traceback.print_exc()
|
| 436 |
-
|
| 437 |
error_msg = str(e)
|
| 438 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 439 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 440 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 441 |
-
detailed_msg =
|
| 442 |
else:
|
| 443 |
detailed_msg = f"System Error: {error_msg}"
|
| 444 |
-
|
| 445 |
raise gr.Error(detailed_msg)
|
| 446 |
|
|
|
|
| 447 |
def prioritize_open_issues(
|
| 448 |
repo_url: str,
|
| 449 |
llm_provider: str = "gemini",
|
| 450 |
llm_model: str = "gemini-2.0-flash",
|
| 451 |
llm_api_key: str | None = None,
|
| 452 |
request: gr.Request = None,
|
| 453 |
-
progress=gr.Progress(track_tqdm=True)
|
| 454 |
):
|
| 455 |
"""
|
| 456 |
Analyzes open issues from the cache to create a prioritized backlog.
|
| 457 |
-
|
| 458 |
Args:
|
| 459 |
repo_url (str): The URL of the GitHub repository.
|
| 460 |
llm_provider (str, optional): The LLM provider. Defaults to 'gemini'.
|
| 461 |
llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
|
| 462 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
|
| 463 |
-
|
| 464 |
Returns:
|
| 465 |
str: A Markdown priority list.
|
| 466 |
"""
|
| 467 |
_validate_api_keys(llm_api_key, request)
|
| 468 |
-
|
| 469 |
if not repo_url:
|
| 470 |
return "Please provide the repository URL."
|
| 471 |
|
|
@@ -476,7 +502,8 @@ def prioritize_open_issues(
|
|
| 476 |
|
| 477 |
progress(0, desc="Fetching open issues from cache...")
|
| 478 |
cursor.execute(
|
| 479 |
-
"SELECT * FROM items WHERE repo = %s AND is_pr = FALSE AND state = 'open' ORDER BY comments DESC, reactions DESC LIMIT 50",
|
|
|
|
| 480 |
)
|
| 481 |
open_issues = cursor.fetchall()
|
| 482 |
conn.close()
|
|
@@ -505,36 +532,38 @@ def prioritize_open_issues(
|
|
| 505 |
messages=messages,
|
| 506 |
temperature=0.1,
|
| 507 |
max_tokens=4096,
|
| 508 |
-
api_key=llm_api_key
|
| 509 |
)
|
| 510 |
return analysis
|
| 511 |
|
| 512 |
except Exception as e:
|
| 513 |
import traceback
|
|
|
|
| 514 |
traceback.print_exc()
|
| 515 |
-
|
| 516 |
error_msg = str(e)
|
| 517 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 518 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 519 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 520 |
-
detailed_msg =
|
| 521 |
else:
|
| 522 |
detailed_msg = f"System Error: {error_msg}"
|
| 523 |
-
|
| 524 |
raise gr.Error(detailed_msg)
|
| 525 |
|
|
|
|
| 526 |
def reply_and_close_issue(
|
| 527 |
repo_url: str,
|
| 528 |
issue_number: str,
|
| 529 |
comment_body: str,
|
| 530 |
close_issue: bool = False,
|
| 531 |
github_token: str | None = None,
|
| 532 |
-
request: gr.Request = None
|
| 533 |
):
|
| 534 |
"""
|
| 535 |
Posts a comment on a GitHub issue and optionally closes it.
|
| 536 |
Crucial for the 'Actionable AI' capability.
|
| 537 |
-
|
| 538 |
Args:
|
| 539 |
repo_url (str): Full repository URL.
|
| 540 |
issue_number (str): The issue number.
|
|
@@ -551,7 +580,7 @@ def reply_and_close_issue(
|
|
| 551 |
repo_slug = f"{owner}/{repo}"
|
| 552 |
headers = {
|
| 553 |
"Authorization": f"token {github_token}",
|
| 554 |
-
"Accept": "application/vnd.github.v3+json"
|
| 555 |
}
|
| 556 |
|
| 557 |
# 1. Post the Comment
|
|
@@ -567,10 +596,14 @@ def reply_and_close_issue(
|
|
| 567 |
if close_issue:
|
| 568 |
issue_url = f"https://api.github.com/repos/{repo_slug}/issues/{issue_number}"
|
| 569 |
# state_reason can be 'completed' or 'not_planned'
|
| 570 |
-
close_resp = requests.patch(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 571 |
close_resp.raise_for_status()
|
| 572 |
action_log += f"\n🔒 Issue #{issue_number} has been CLOSED."
|
| 573 |
-
|
| 574 |
return f"## Success\n{action_log}"
|
| 575 |
|
| 576 |
except requests.exceptions.HTTPError as e:
|
|
@@ -581,19 +614,21 @@ def reply_and_close_issue(
|
|
| 581 |
raise gr.Error(f"GitHub API Error: {e}")
|
| 582 |
except Exception as e:
|
| 583 |
import traceback
|
|
|
|
| 584 |
traceback.print_exc()
|
| 585 |
raise gr.Error(f"System Error: {str(e)}")
|
| 586 |
|
|
|
|
| 587 |
def generate_theme(
|
| 588 |
prompt: str,
|
| 589 |
llm_provider: str = "gemini",
|
| 590 |
llm_model: str = "gemini-2.0-flash",
|
| 591 |
llm_api_key: str | None = None,
|
| 592 |
-
request: gr.Request = None
|
| 593 |
):
|
| 594 |
"""
|
| 595 |
Generates a Gradio theme based on a text prompt.
|
| 596 |
-
|
| 597 |
Args:
|
| 598 |
prompt (str): Description of the desired theme.
|
| 599 |
llm_provider (str, optional): LLM provider. Defaults to 'gemini'.
|
|
@@ -601,10 +636,20 @@ def generate_theme(
|
|
| 601 |
llm_api_key (str, optional): API Key. **OPTIONAL**. If empty, uses server keys.
|
| 602 |
"""
|
| 603 |
_validate_api_keys(llm_api_key, request)
|
| 604 |
-
|
| 605 |
try:
|
| 606 |
-
messages = [
|
| 607 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 608 |
|
| 609 |
if isinstance(llm_output, str) and llm_output.startswith("Error:"):
|
| 610 |
raise Exception(llm_output)
|
|
@@ -629,11 +674,11 @@ def generate_theme(
|
|
| 629 |
|
| 630 |
if "font" in constructor_kwargs:
|
| 631 |
fonts = constructor_kwargs.get("font", [])
|
| 632 |
-
constructor_kwargs["font"] = [gr.themes.GoogleFont(name) if name not in ["ui-sans-serif", "system-ui", "sans-serif"] else name for name in fonts]
|
| 633 |
if "font_mono" in constructor_kwargs:
|
| 634 |
mono_fonts = constructor_kwargs.get("font_mono", [])
|
| 635 |
constructor_kwargs["font_mono"] = [
|
| 636 |
-
gr.themes.GoogleFont(name) if name not in ["ui-monospace", "Consolas", "monospace"] else name for name in mono_fonts
|
| 637 |
]
|
| 638 |
|
| 639 |
theme_object = gr.themes.Default(**constructor_kwargs)
|
|
@@ -644,9 +689,10 @@ def generate_theme(
|
|
| 644 |
css_str = theme_object._get_theme_css()
|
| 645 |
|
| 646 |
def format_arg(val):
|
| 647 |
-
if isinstance(val, str):
|
|
|
|
| 648 |
if isinstance(val, list):
|
| 649 |
-
font_list = [f"gr.themes.GoogleFont('{f.name}')" if isinstance(f, gr.themes.GoogleFont) else f"'{f}'" for f in val]
|
| 650 |
return f"[{', '.join(font_list)}]"
|
| 651 |
return str(val)
|
| 652 |
|
|
@@ -659,16 +705,17 @@ def generate_theme(
|
|
| 659 |
|
| 660 |
except Exception as e:
|
| 661 |
import traceback
|
|
|
|
| 662 |
traceback.print_exc()
|
| 663 |
-
|
| 664 |
error_msg = str(e)
|
| 665 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 666 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 667 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 668 |
-
detailed_msg =
|
| 669 |
else:
|
| 670 |
detailed_msg = f"System Error: {error_msg}"
|
| 671 |
-
|
| 672 |
raise gr.Error(detailed_msg)
|
| 673 |
|
| 674 |
|
|
@@ -695,11 +742,11 @@ def generate_ui_from_sketch(
|
|
| 695 |
llm_provider: str = "sambanova",
|
| 696 |
llm_model: str = "Llama-4-Maverick-17B-128E-Instruct",
|
| 697 |
llm_api_key: str | None = None,
|
| 698 |
-
request: gr.Request = None
|
| 699 |
):
|
| 700 |
"""
|
| 701 |
Generates Python code for a Gradio UI from a sketch image.
|
| 702 |
-
|
| 703 |
Args:
|
| 704 |
sketch_image (Image): The input sketch image.
|
| 705 |
text_description (str): Optional text description.
|
|
@@ -715,10 +762,21 @@ def generate_ui_from_sketch(
|
|
| 715 |
try:
|
| 716 |
messages = [
|
| 717 |
{"role": "system", "content": SYSTEM_PROMPT_SKETCH_EN},
|
| 718 |
-
{
|
|
|
|
|
|
|
|
|
|
| 719 |
]
|
| 720 |
|
| 721 |
-
llm_output = LLMFactory.call(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 722 |
|
| 723 |
if isinstance(llm_output, str) and llm_output.startswith("Error:"):
|
| 724 |
raise Exception(llm_output)
|
|
@@ -727,16 +785,17 @@ def generate_ui_from_sketch(
|
|
| 727 |
return code.strip()
|
| 728 |
except Exception as e:
|
| 729 |
import traceback
|
|
|
|
| 730 |
traceback.print_exc()
|
| 731 |
-
|
| 732 |
error_msg = str(e)
|
| 733 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 734 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 735 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 736 |
-
detailed_msg =
|
| 737 |
else:
|
| 738 |
detailed_msg = f"System Error: {error_msg}"
|
| 739 |
-
|
| 740 |
raise gr.Error(detailed_msg)
|
| 741 |
|
| 742 |
|
|
@@ -745,17 +804,24 @@ def generate_ui_from_sketch(
|
|
| 745 |
|
| 746 |
# region HELPER FUNCTIONS
|
| 747 |
def _validate_api_keys(llm_api_key, request):
|
| 748 |
-
USE_SERVER_KEYS = os.getenv("USE_SERVER_KEYS", "false").lower() in (
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 749 |
if not USE_SERVER_KEYS and request and request.headers.get("referer"):
|
| 750 |
if not llm_api_key or not llm_api_key.strip():
|
| 751 |
raise gr.Error("⚠️ LLM API Key Required! Please enter your own API Key to use this tool in the demo UI.")
|
| 752 |
-
|
|
|
|
| 753 |
def _check_env():
|
| 754 |
if not os.getenv("GOOGLE_API_KEY"):
|
| 755 |
print("WARNING: The GOOGLE_API_KEY environment variable was not set.")
|
| 756 |
if not os.getenv("SAMBANOVA_API_KEY"):
|
| 757 |
print("WARNING: The SAMBANOVA_API_KEY environment variable was not set.")
|
| 758 |
|
|
|
|
| 759 |
def _inject_theme_helper_js():
|
| 760 |
js_code = """
|
| 761 |
function apply_gradio_theme(css) {
|
|
@@ -771,16 +837,26 @@ def _inject_theme_helper_js():
|
|
| 771 |
"""
|
| 772 |
return {"js": js_code}
|
| 773 |
|
|
|
|
| 774 |
def _add_interactive_inputs():
|
| 775 |
with gr.Row():
|
| 776 |
with gr.Column(variant="panel", scale=1):
|
| 777 |
gr.Markdown("### Interactive Controls")
|
| 778 |
radio = gr.Radio(["A", "B", "C"], label="Radio")
|
| 779 |
drop = gr.Dropdown(["Option 1", "Option 2"], show_label=False)
|
| 780 |
-
drop_2 = gr.Dropdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 781 |
check = gr.Checkbox(label="Checkbox")
|
| 782 |
with gr.Column(variant="panel", scale=2):
|
| 783 |
-
gr.Image(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 784 |
with gr.Row():
|
| 785 |
gr.Button("Primary", variant="primary")
|
| 786 |
gr.Button("Secondary")
|
|
@@ -790,18 +866,29 @@ def _add_interactive_inputs():
|
|
| 790 |
gr.Button("Stop", variant="stop", size="sm")
|
| 791 |
return radio, drop, drop_2, check
|
| 792 |
|
|
|
|
| 793 |
def _add_data_display_components():
|
| 794 |
with gr.Row():
|
| 795 |
gr.Dataframe(value=[[1, 2], [3, 4]], label="Dataframe"), gr.JSON(value={"a": 1}), gr.Label(value={"cat": 0.7}), gr.File()
|
| 796 |
|
|
|
|
| 797 |
def _add_media_components():
|
| 798 |
with gr.Row():
|
| 799 |
(
|
| 800 |
gr.ColorPicker(label="Color Picker"),
|
| 801 |
gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4"),
|
| 802 |
-
gr.Gallery(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 803 |
)
|
| 804 |
|
|
|
|
| 805 |
def _add_chatbot_component():
|
| 806 |
with gr.Row():
|
| 807 |
with gr.Column(scale=2):
|
|
@@ -813,19 +900,35 @@ def _add_chatbot_component():
|
|
| 813 |
gr.Slider(label="Temperature"), gr.Checkbox(label="Streaming")
|
| 814 |
return chatbot, msg_input, add_msg_btn
|
| 815 |
|
|
|
|
| 816 |
def _create_example_app():
|
| 817 |
with gr.Column(scale=3, elem_id="app"):
|
| 818 |
with gr.Tabs():
|
| 819 |
with gr.TabItem("Common Inputs"):
|
| 820 |
-
gr.Textbox(
|
| 821 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 822 |
with gr.Row():
|
| 823 |
gr.Slider(label="Slider 1")
|
| 824 |
gr.Slider(label="Slider 2")
|
| 825 |
gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
|
| 826 |
radio, drop, drop_2, check = _add_interactive_inputs()
|
| 827 |
gr.Examples(
|
| 828 |
-
examples=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 829 |
)
|
| 830 |
with gr.TabItem("Data and Media"):
|
| 831 |
_add_data_display_components()
|
|
@@ -834,6 +937,7 @@ def _create_example_app():
|
|
| 834 |
chatbot, msg_input, add_msg_btn = _add_chatbot_component()
|
| 835 |
return chatbot, msg_input, add_msg_btn
|
| 836 |
|
|
|
|
| 837 |
def _create_gradio_lite_html(python_code: str) -> str:
|
| 838 |
"""
|
| 839 |
Wraps the Python code in a Gradio-Lite HTML structure with an iframe.
|
|
@@ -841,7 +945,7 @@ def _create_gradio_lite_html(python_code: str) -> str:
|
|
| 841 |
"""
|
| 842 |
# Escape any existing script tags to prevent injection issues inside srcdoc
|
| 843 |
safe_code = python_code.replace("<", "<").replace(">", ">")
|
| 844 |
-
|
| 845 |
# To make sure demo.launch() is present for Lite to render
|
| 846 |
if "demo.launch()" not in safe_code:
|
| 847 |
safe_code += "\n\ndemo.launch()"
|
|
@@ -874,6 +978,8 @@ def _create_gradio_lite_html(python_code: str) -> str:
|
|
| 874 |
</div>
|
| 875 |
"""
|
| 876 |
return html_template
|
|
|
|
|
|
|
| 877 |
# endregion
|
| 878 |
|
| 879 |
# region MAIN LAYOUT DEFINITION
|
|
@@ -890,34 +996,34 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 890 |
|
| 891 |
gr.Markdown("# 🤖 GitRepo Inspector (MCP Server) 🤖")
|
| 892 |
|
| 893 |
-
#
|
| 894 |
with gr.Accordion("⚙️ Global Configuration (Configure tools here)", open=True):
|
| 895 |
with gr.Row():
|
| 896 |
global_github_token = gr.Textbox(
|
| 897 |
-
label="GitHub Token",
|
| 898 |
type="password",
|
| 899 |
-
placeholder="Optional for public repos, required for higher rate limits."
|
| 900 |
)
|
| 901 |
global_api_key = gr.Textbox(
|
| 902 |
-
label="LLM API Key",
|
| 903 |
type="password",
|
| 904 |
-
placeholder="Required for UI demo. (Matches selected Provider)"
|
| 905 |
)
|
| 906 |
with gr.Row():
|
| 907 |
global_provider = gr.Dropdown(
|
| 908 |
-
choices=list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()),
|
| 909 |
-
value="gemini",
|
| 910 |
-
label="LLM Provider (For all tools)"
|
| 911 |
)
|
| 912 |
global_model = gr.Dropdown(
|
| 913 |
-
choices=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"],
|
| 914 |
-
value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0],
|
| 915 |
-
label="Main Model (Text/Chat)",
|
| 916 |
-
interactive=True
|
| 917 |
)
|
| 918 |
-
with gr.Row():
|
| 919 |
-
models_info_output = gr.JSON(label="Available Models Registry", visible=False)
|
| 920 |
-
get_models_btn = gr.Button("Refresh Models Info", visible=False)
|
| 921 |
# -----------------------------
|
| 922 |
|
| 923 |
with gr.Tabs():
|
|
@@ -931,7 +1037,10 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 931 |
with gr.TabItem("2. Issue Analyzer"):
|
| 932 |
gr.Markdown("### 🔍 Analyze a GitHub Issue")
|
| 933 |
with gr.Row():
|
| 934 |
-
issue_repo_url = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
| 935 |
issue_number_input = gr.Number(label="Issue Number", precision=0)
|
| 936 |
issue_analyze_button = gr.Button("Analyze Issue 🕵️", variant="primary")
|
| 937 |
issue_report_output = gr.HTML(label="Analysis Report")
|
|
@@ -953,28 +1062,36 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 953 |
with gr.TabItem("5. Action Runner (Reply & Close)"):
|
| 954 |
gr.Markdown("### ⚡ Action Runner")
|
| 955 |
gr.Markdown("This tool allows the Agent (or you) to take action: post comments and close resolved issues.")
|
| 956 |
-
|
| 957 |
with gr.Row():
|
| 958 |
action_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
|
| 959 |
action_issue_number = gr.Number(label="Issue Number", precision=0)
|
| 960 |
-
|
| 961 |
action_comment = gr.Textbox(
|
| 962 |
-
label="Comment Body (Markdown supported)",
|
| 963 |
-
lines=5,
|
| 964 |
-
placeholder="Ex: This issue is resolved in PR #123. Closing now."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 965 |
)
|
| 966 |
-
|
| 967 |
-
|
| 968 |
-
|
| 969 |
-
action_button = gr.Button("Execute Action 🚀", variant="stop") # Variant stop (vermelho) para indicar cuidado
|
| 970 |
action_output = gr.Markdown(label="Execution Result")
|
| 971 |
-
|
| 972 |
with gr.TabItem("BONUS 1 - Theme Generator"):
|
| 973 |
gr.Markdown("### 🖌️ Create a Theme with Natural Language")
|
| 974 |
with gr.Row():
|
| 975 |
with gr.Column(scale=1, min_width=450):
|
| 976 |
with gr.Group():
|
| 977 |
-
theme_prompt_input = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 978 |
theme_generate_button = gr.Button("Generate Theme ✨", variant="primary")
|
| 979 |
status_output = gr.Textbox(label="Status", interactive=False)
|
| 980 |
with gr.Tabs():
|
|
@@ -1000,7 +1117,11 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 1000 |
)
|
| 1001 |
with gr.Row():
|
| 1002 |
with gr.Column(scale=1):
|
| 1003 |
-
sketch_input = gr.Image(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1004 |
text_desc_input = gr.Textbox(label="Additional Description (Optional)")
|
| 1005 |
sketch_generate_button = gr.Button("1. Generate Code", variant="secondary")
|
| 1006 |
with gr.Column(scale=2):
|
|
@@ -1010,45 +1131,49 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 1010 |
with gr.TabItem("👀 Live Preview"):
|
| 1011 |
sketch_preview_output = gr.HTML(label="Gradio-Lite Preview")
|
| 1012 |
|
| 1013 |
-
#
|
| 1014 |
|
| 1015 |
# Unified Model Updater: Updates BOTH the Text Model (Global) and Vision Model (Sketch) dropdowns
|
| 1016 |
get_models_btn.click(
|
| 1017 |
fn=get_available_models,
|
| 1018 |
inputs=None,
|
| 1019 |
outputs=[models_info_output],
|
| 1020 |
-
api_name="get_available_models"
|
| 1021 |
)
|
|
|
|
| 1022 |
def update_all_model_choices(provider):
|
| 1023 |
text_models = AVAILABLE_ISSUE_MODELS_BY_PROVIDER.get(provider, [])
|
| 1024 |
vision_models = AVAILABLE_SKETCH_MODELS_BY_PROVIDER.get(provider, [])
|
| 1025 |
return (
|
| 1026 |
gr.update(choices=text_models, value=text_models[0] if text_models else None),
|
| 1027 |
-
gr.update(choices=vision_models, value=vision_models[0] if vision_models else None)
|
| 1028 |
)
|
| 1029 |
|
| 1030 |
global_provider.change(
|
| 1031 |
fn=update_all_model_choices,
|
| 1032 |
inputs=[global_provider],
|
| 1033 |
-
outputs=[global_model, sketch_model_dropdown],
|
| 1034 |
-
show_api=False
|
| 1035 |
)
|
| 1036 |
|
| 1037 |
# Tool 1: Sync
|
| 1038 |
sync_button.click(
|
| 1039 |
-
fn=sync_repository,
|
| 1040 |
-
inputs=[sync_repo_url, global_github_token],
|
| 1041 |
-
outputs=[sync_status_output],
|
| 1042 |
-
api_name="sync_repository"
|
| 1043 |
)
|
| 1044 |
|
| 1045 |
# Tool 2: Analyzer
|
| 1046 |
issue_analyze_button.click(
|
| 1047 |
fn=analyze_github_issue,
|
| 1048 |
inputs=[
|
| 1049 |
-
issue_repo_url,
|
| 1050 |
-
|
| 1051 |
-
|
|
|
|
|
|
|
|
|
|
| 1052 |
],
|
| 1053 |
outputs=[issue_report_output],
|
| 1054 |
api_name="analyze_github_issue",
|
|
@@ -1058,9 +1183,12 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 1058 |
dup_find_button.click(
|
| 1059 |
fn=find_duplicate_issues,
|
| 1060 |
inputs=[
|
| 1061 |
-
dup_repo_url,
|
| 1062 |
-
|
| 1063 |
-
|
|
|
|
|
|
|
|
|
|
| 1064 |
],
|
| 1065 |
outputs=[dup_report_output],
|
| 1066 |
api_name="find_duplicate_issues",
|
|
@@ -1069,63 +1197,66 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 1069 |
# Tool 4: Prioritizer
|
| 1070 |
prio_run_button.click(
|
| 1071 |
fn=prioritize_open_issues,
|
| 1072 |
-
inputs=[
|
| 1073 |
-
prio_repo_url,
|
| 1074 |
-
global_provider, global_model,
|
| 1075 |
-
global_api_key
|
| 1076 |
-
],
|
| 1077 |
outputs=[prio_report_output],
|
| 1078 |
-
api_name="prioritize_open_issues"
|
| 1079 |
)
|
| 1080 |
|
| 1081 |
# Tool 5: Action Runner
|
| 1082 |
action_button.click(
|
| 1083 |
fn=reply_and_close_issue,
|
| 1084 |
inputs=[
|
| 1085 |
-
action_repo_url,
|
| 1086 |
-
action_issue_number,
|
| 1087 |
-
action_comment,
|
| 1088 |
-
action_close_checkbox,
|
| 1089 |
-
global_github_token
|
| 1090 |
],
|
| 1091 |
outputs=[action_output],
|
| 1092 |
-
api_name="reply_and_close_issue"
|
| 1093 |
)
|
| 1094 |
-
|
| 1095 |
# Bonus 1: Theme Gen
|
| 1096 |
theme_generate_button.click(
|
| 1097 |
fn=generate_theme,
|
| 1098 |
-
inputs=[
|
| 1099 |
-
|
| 1100 |
-
|
| 1101 |
-
|
|
|
|
|
|
|
| 1102 |
],
|
| 1103 |
-
outputs=[generated_theme_state, python_code_output, status_output, css_data_transport],
|
| 1104 |
).then(
|
| 1105 |
fn=None,
|
| 1106 |
inputs=[css_data_transport],
|
| 1107 |
outputs=None,
|
| 1108 |
js="(css) => { const action = () => apply_gradio_theme(css); if (typeof apply_gradio_theme === 'function') { action(); } else { document.addEventListener('theme-helper-injected', action, { once: true }); } }",
|
| 1109 |
)
|
| 1110 |
-
upload_button.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1111 |
|
| 1112 |
# Bonus 2: Sketch (Uses Global Provider & Key, but LOCAL Vision Model)
|
| 1113 |
generate_event = sketch_generate_button.click(
|
| 1114 |
fn=generate_ui_from_sketch,
|
| 1115 |
inputs=[
|
| 1116 |
-
sketch_input,
|
| 1117 |
-
|
| 1118 |
-
|
|
|
|
|
|
|
| 1119 |
],
|
| 1120 |
outputs=[sketch_code_output],
|
| 1121 |
-
)
|
| 1122 |
generate_event.success(
|
| 1123 |
fn=_create_gradio_lite_html,
|
| 1124 |
inputs=[sketch_code_output],
|
| 1125 |
outputs=[sketch_preview_output],
|
| 1126 |
-
show_api=False
|
| 1127 |
)
|
| 1128 |
-
|
| 1129 |
def add_message_to_chat(history, message):
|
| 1130 |
"""
|
| 1131 |
A simple function to add a user message to the chat history
|
|
@@ -1142,15 +1273,22 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
|
|
| 1142 |
time.sleep(0.5)
|
| 1143 |
history.append({"role": "assistant", "content": "Thank you for your message!"})
|
| 1144 |
return history
|
| 1145 |
-
|
| 1146 |
# Chat Example Logic
|
| 1147 |
-
add_msg_btn_comp.click(
|
| 1148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1149 |
# Init
|
| 1150 |
-
app.load(fn=initialize_database, inputs=None, outputs=None, show_api=False).then(
|
|
|
|
|
|
|
| 1151 |
|
| 1152 |
# endregion
|
| 1153 |
if __name__ == "__main__":
|
| 1154 |
_check_env()
|
| 1155 |
app.allowed_paths = ["."]
|
| 1156 |
-
app.launch(mcp_server=True, server_port=7860)
|
|
|
|
| 38 |
return {
|
| 39 |
"text_models": AVAILABLE_ISSUE_MODELS_BY_PROVIDER,
|
| 40 |
"vision_models": AVAILABLE_SKETCH_MODELS_BY_PROVIDER,
|
| 41 |
+
"providers": list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()),
|
| 42 |
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None, desc: str = ""):
|
| 46 |
"""Helper function to fetch all pages from a paginated GitHub API endpoint."""
|
| 47 |
results = []
|
| 48 |
page_num = 1
|
| 49 |
+
base_url = url.split("&page=")[0]
|
| 50 |
|
| 51 |
while True:
|
| 52 |
paginated_url = f"{base_url}&page={page_num}"
|
|
|
|
| 68 |
return results
|
| 69 |
|
| 70 |
|
| 71 |
+
def sync_repository(
|
| 72 |
+
repo_url: str,
|
| 73 |
+
github_token: str | None = None,
|
| 74 |
+
progress=gr.Progress(track_tqdm=True),
|
| 75 |
+
):
|
| 76 |
"""
|
| 77 |
Performs a lightweight sync of a GitHub repository with the local database.
|
| 78 |
+
|
| 79 |
Args:
|
| 80 |
repo_url (str): The full URL of the GitHub repository (e.g., 'https://github.com/gradio-app/gradio').
|
| 81 |
github_token (str, optional): A GitHub Personal Access Token. Optional. If not provided, the tool will work but may hit public API rate limits.
|
| 82 |
+
|
| 83 |
Returns:
|
| 84 |
str: A status message indicating completion.
|
| 85 |
"""
|
|
|
|
| 142 |
VALUES (%s, %s, %s, %s, %s)
|
| 143 |
ON CONFLICT (tag_name) DO NOTHING;
|
| 144 |
""",
|
| 145 |
+
(
|
| 146 |
+
repo_slug,
|
| 147 |
+
release["tag_name"],
|
| 148 |
+
release["name"],
|
| 149 |
+
release.get("body"),
|
| 150 |
+
release["published_at"],
|
| 151 |
+
),
|
| 152 |
)
|
| 153 |
conn.commit()
|
| 154 |
conn.close()
|
|
|
|
| 156 |
|
| 157 |
except Exception as e:
|
| 158 |
import traceback
|
| 159 |
+
|
| 160 |
traceback.print_exc()
|
| 161 |
+
|
| 162 |
+
error_msg = str(e)
|
| 163 |
if "429" in error_msg or "Rate" in error_msg:
|
| 164 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 165 |
else:
|
| 166 |
detailed_msg = f"System Error: {error_msg}"
|
| 167 |
+
|
| 168 |
raise gr.Error(detailed_msg)
|
| 169 |
|
| 170 |
|
|
|
|
| 176 |
github_token: str | None = None,
|
| 177 |
llm_api_key: str | None = None,
|
| 178 |
request: gr.Request = None,
|
| 179 |
+
progress=gr.Progress(track_tqdm=True),
|
| 180 |
):
|
| 181 |
"""
|
| 182 |
Analyzes a single GitHub issue to determine its resolution status by checking PRs and Releases.
|
|
|
|
| 188 |
llm_model (str, optional): The specific model name. Defaults to 'gemini-2.0-flash'.
|
| 189 |
github_token (str, optional): GitHub Token. Optional. Recommended for higher rate limits.
|
| 190 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided (empty string), the server will use its internal environment keys to perform the analysis.
|
| 191 |
+
|
| 192 |
Returns:
|
| 193 |
str: An HTML-formatted analysis report.
|
| 194 |
"""
|
| 195 |
+
_validate_api_keys(llm_api_key, request)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
|
| 197 |
+
if not repo_url or not issue_number:
|
| 198 |
+
return "## Error\nPlease provide both a repository URL and an issue number."
|
| 199 |
+
try:
|
| 200 |
repo_slug = "/".join(repo_url.strip().replace("https://github.com/", "").split("/")[:2])
|
| 201 |
headers = {"Authorization": f"token {github_token}"} if github_token else {}
|
| 202 |
conn = connect()
|
|
|
|
| 225 |
pr_urls_to_fetch.append(f"https://api.github.com/repos/{repo_slug}/pulls/{pr_num}")
|
| 226 |
|
| 227 |
for i, url in enumerate(pr_urls_to_fetch):
|
| 228 |
+
progress(
|
| 229 |
+
0.4 + (0.2 * (i / len(pr_urls_to_fetch))) if pr_urls_to_fetch else 0.4,
|
| 230 |
+
desc=f"Fetching details for PR {i + 1}/{len(pr_urls_to_fetch)}...",
|
| 231 |
+
)
|
| 232 |
pr_res = requests.get(url, headers=headers)
|
| 233 |
if pr_res.status_code == 200:
|
| 234 |
linked_prs_details.append(pr_res.json())
|
| 235 |
|
| 236 |
# 2. GET RELEASE DATA FROM CACHE
|
| 237 |
progress(0.6, desc="Querying local release cache...")
|
| 238 |
+
cursor.execute(
|
| 239 |
+
"SELECT * FROM releases WHERE repo = %s ORDER BY published_at DESC LIMIT 50",
|
| 240 |
+
(repo_slug,),
|
| 241 |
+
)
|
| 242 |
releases = cursor.fetchall()
|
| 243 |
conn.close()
|
| 244 |
|
|
|
|
| 254 |
llm_model,
|
| 255 |
messages=[{"role": "user", "content": summarizer_prompt}],
|
| 256 |
temperature=0.0,
|
| 257 |
+
api_key=llm_api_key,
|
| 258 |
)
|
| 259 |
except Exception:
|
| 260 |
additional_problems_summary = "Could not summarize comments due to an error."
|
|
|
|
| 275 |
|
| 276 |
release_notes_summary = (
|
| 277 |
"\n\n".join(
|
| 278 |
+
[
|
| 279 |
+
f"- **Release {r['tag_name']} ({str(r['published_at'])[:10]}):**\n{str(r['body'])[:500] if r['body'] else 'No description.'}..."
|
| 280 |
+
for r in releases
|
| 281 |
+
]
|
| 282 |
)
|
| 283 |
if releases
|
| 284 |
else "No releases found in local cache."
|
|
|
|
| 290 |
"issue_state": issue_data["state"],
|
| 291 |
"issue_body": issue_data["body"] or "No description provided.",
|
| 292 |
"additional_problems_summary": additional_problems_summary.strip(),
|
| 293 |
+
"issue_labels": (json.dumps([label["name"] for label in issue_data["labels"]]) if issue_data["labels"] else "None"),
|
| 294 |
"pull_requests_summary": pull_requests_summary,
|
| 295 |
"release_notes_summary": release_notes_summary,
|
| 296 |
}
|
|
|
|
| 306 |
messages=messages,
|
| 307 |
temperature=0.1,
|
| 308 |
max_tokens=2048,
|
| 309 |
+
api_key=llm_api_key,
|
| 310 |
)
|
| 311 |
+
|
| 312 |
report_start_marker = f"## Analysis of Issue #{issue_number}"
|
| 313 |
report_start_index = raw_output.find(report_start_marker)
|
| 314 |
|
|
|
|
| 336 |
|
| 337 |
except Exception as e:
|
| 338 |
import traceback
|
| 339 |
+
|
| 340 |
traceback.print_exc()
|
| 341 |
+
|
| 342 |
error_msg = str(e)
|
| 343 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 344 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 345 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 346 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 347 |
else:
|
| 348 |
detailed_msg = f"System Error: {error_msg}"
|
| 349 |
+
|
| 350 |
raise gr.Error(detailed_msg)
|
| 351 |
|
| 352 |
|
|
|
|
| 358 |
github_token: str | None = None,
|
| 359 |
llm_api_key: str | None = None,
|
| 360 |
request: gr.Request = None,
|
| 361 |
+
progress=gr.Progress(),
|
| 362 |
):
|
| 363 |
"""
|
| 364 |
Finds potential duplicate issues for a given issue using mentions and keyword search.
|
| 365 |
+
|
| 366 |
Args:
|
| 367 |
repo_url (str): The full URL of the GitHub repository.
|
| 368 |
issue_number (str): The main issue number to check.
|
|
|
|
| 370 |
llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
|
| 371 |
github_token (str, optional): GitHub Token. Optional.
|
| 372 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
|
| 373 |
+
|
| 374 |
Returns:
|
| 375 |
str: A Markdown list of potential duplicates.
|
| 376 |
"""
|
| 377 |
_validate_api_keys(llm_api_key, request)
|
| 378 |
+
|
| 379 |
if not repo_url or not issue_number:
|
| 380 |
return "Please provide the repository and the main issue number."
|
| 381 |
|
|
|
|
| 387 |
cursor = conn.cursor()
|
| 388 |
|
| 389 |
progress(0, desc="Fetching main issue from cache...")
|
| 390 |
+
cursor.execute(
|
| 391 |
+
"SELECT * FROM items WHERE repo = %s AND number = %s",
|
| 392 |
+
(repo_slug, issue_number),
|
| 393 |
+
)
|
| 394 |
main_issue = cursor.fetchone()
|
| 395 |
+
|
| 396 |
if not main_issue:
|
| 397 |
conn.close()
|
| 398 |
return "Main issue not found in cache. Please synchronize the repository first."
|
|
|
|
| 450 |
llm_model,
|
| 451 |
messages=messages,
|
| 452 |
temperature=0.0,
|
| 453 |
+
api_key=llm_api_key,
|
| 454 |
)
|
| 455 |
return analysis
|
| 456 |
|
| 457 |
except Exception as e:
|
| 458 |
import traceback
|
| 459 |
+
|
| 460 |
traceback.print_exc()
|
| 461 |
+
|
| 462 |
error_msg = str(e)
|
| 463 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 464 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 465 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 466 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 467 |
else:
|
| 468 |
detailed_msg = f"System Error: {error_msg}"
|
| 469 |
+
|
| 470 |
raise gr.Error(detailed_msg)
|
| 471 |
|
| 472 |
+
|
| 473 |
def prioritize_open_issues(
|
| 474 |
repo_url: str,
|
| 475 |
llm_provider: str = "gemini",
|
| 476 |
llm_model: str = "gemini-2.0-flash",
|
| 477 |
llm_api_key: str | None = None,
|
| 478 |
request: gr.Request = None,
|
| 479 |
+
progress=gr.Progress(track_tqdm=True),
|
| 480 |
):
|
| 481 |
"""
|
| 482 |
Analyzes open issues from the cache to create a prioritized backlog.
|
| 483 |
+
|
| 484 |
Args:
|
| 485 |
repo_url (str): The URL of the GitHub repository.
|
| 486 |
llm_provider (str, optional): The LLM provider. Defaults to 'gemini'.
|
| 487 |
llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
|
| 488 |
llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
|
| 489 |
+
|
| 490 |
Returns:
|
| 491 |
str: A Markdown priority list.
|
| 492 |
"""
|
| 493 |
_validate_api_keys(llm_api_key, request)
|
| 494 |
+
|
| 495 |
if not repo_url:
|
| 496 |
return "Please provide the repository URL."
|
| 497 |
|
|
|
|
| 502 |
|
| 503 |
progress(0, desc="Fetching open issues from cache...")
|
| 504 |
cursor.execute(
|
| 505 |
+
"SELECT * FROM items WHERE repo = %s AND is_pr = FALSE AND state = 'open' ORDER BY comments DESC, reactions DESC LIMIT 50",
|
| 506 |
+
(repo_slug,),
|
| 507 |
)
|
| 508 |
open_issues = cursor.fetchall()
|
| 509 |
conn.close()
|
|
|
|
| 532 |
messages=messages,
|
| 533 |
temperature=0.1,
|
| 534 |
max_tokens=4096,
|
| 535 |
+
api_key=llm_api_key,
|
| 536 |
)
|
| 537 |
return analysis
|
| 538 |
|
| 539 |
except Exception as e:
|
| 540 |
import traceback
|
| 541 |
+
|
| 542 |
traceback.print_exc()
|
| 543 |
+
|
| 544 |
error_msg = str(e)
|
| 545 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 546 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 547 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 548 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 549 |
else:
|
| 550 |
detailed_msg = f"System Error: {error_msg}"
|
| 551 |
+
|
| 552 |
raise gr.Error(detailed_msg)
|
| 553 |
|
| 554 |
+
|
| 555 |
def reply_and_close_issue(
|
| 556 |
repo_url: str,
|
| 557 |
issue_number: str,
|
| 558 |
comment_body: str,
|
| 559 |
close_issue: bool = False,
|
| 560 |
github_token: str | None = None,
|
| 561 |
+
request: gr.Request = None,
|
| 562 |
):
|
| 563 |
"""
|
| 564 |
Posts a comment on a GitHub issue and optionally closes it.
|
| 565 |
Crucial for the 'Actionable AI' capability.
|
| 566 |
+
|
| 567 |
Args:
|
| 568 |
repo_url (str): Full repository URL.
|
| 569 |
issue_number (str): The issue number.
|
|
|
|
| 580 |
repo_slug = f"{owner}/{repo}"
|
| 581 |
headers = {
|
| 582 |
"Authorization": f"token {github_token}",
|
| 583 |
+
"Accept": "application/vnd.github.v3+json",
|
| 584 |
}
|
| 585 |
|
| 586 |
# 1. Post the Comment
|
|
|
|
| 596 |
if close_issue:
|
| 597 |
issue_url = f"https://api.github.com/repos/{repo_slug}/issues/{issue_number}"
|
| 598 |
# state_reason can be 'completed' or 'not_planned'
|
| 599 |
+
close_resp = requests.patch(
|
| 600 |
+
issue_url,
|
| 601 |
+
headers=headers,
|
| 602 |
+
json={"state": "closed", "state_reason": "completed"},
|
| 603 |
+
)
|
| 604 |
close_resp.raise_for_status()
|
| 605 |
action_log += f"\n🔒 Issue #{issue_number} has been CLOSED."
|
| 606 |
+
|
| 607 |
return f"## Success\n{action_log}"
|
| 608 |
|
| 609 |
except requests.exceptions.HTTPError as e:
|
|
|
|
| 614 |
raise gr.Error(f"GitHub API Error: {e}")
|
| 615 |
except Exception as e:
|
| 616 |
import traceback
|
| 617 |
+
|
| 618 |
traceback.print_exc()
|
| 619 |
raise gr.Error(f"System Error: {str(e)}")
|
| 620 |
|
| 621 |
+
|
| 622 |
def generate_theme(
|
| 623 |
prompt: str,
|
| 624 |
llm_provider: str = "gemini",
|
| 625 |
llm_model: str = "gemini-2.0-flash",
|
| 626 |
llm_api_key: str | None = None,
|
| 627 |
+
request: gr.Request = None,
|
| 628 |
):
|
| 629 |
"""
|
| 630 |
Generates a Gradio theme based on a text prompt.
|
| 631 |
+
|
| 632 |
Args:
|
| 633 |
prompt (str): Description of the desired theme.
|
| 634 |
llm_provider (str, optional): LLM provider. Defaults to 'gemini'.
|
|
|
|
| 636 |
llm_api_key (str, optional): API Key. **OPTIONAL**. If empty, uses server keys.
|
| 637 |
"""
|
| 638 |
_validate_api_keys(llm_api_key, request)
|
| 639 |
+
|
| 640 |
try:
|
| 641 |
+
messages = [
|
| 642 |
+
{"role": "system", "content": SYSTEM_PROMPT_THEME_GENERATOR_EN},
|
| 643 |
+
{"role": "user", "content": prompt},
|
| 644 |
+
]
|
| 645 |
+
llm_output = LLMFactory.call(
|
| 646 |
+
llm_provider=llm_provider,
|
| 647 |
+
llm_model=llm_model,
|
| 648 |
+
messages=messages,
|
| 649 |
+
temperature=0.1,
|
| 650 |
+
max_tokens=2048,
|
| 651 |
+
api_key=llm_api_key,
|
| 652 |
+
)
|
| 653 |
|
| 654 |
if isinstance(llm_output, str) and llm_output.startswith("Error:"):
|
| 655 |
raise Exception(llm_output)
|
|
|
|
| 674 |
|
| 675 |
if "font" in constructor_kwargs:
|
| 676 |
fonts = constructor_kwargs.get("font", [])
|
| 677 |
+
constructor_kwargs["font"] = [(gr.themes.GoogleFont(name) if name not in ["ui-sans-serif", "system-ui", "sans-serif"] else name) for name in fonts]
|
| 678 |
if "font_mono" in constructor_kwargs:
|
| 679 |
mono_fonts = constructor_kwargs.get("font_mono", [])
|
| 680 |
constructor_kwargs["font_mono"] = [
|
| 681 |
+
(gr.themes.GoogleFont(name) if name not in ["ui-monospace", "Consolas", "monospace"] else name) for name in mono_fonts
|
| 682 |
]
|
| 683 |
|
| 684 |
theme_object = gr.themes.Default(**constructor_kwargs)
|
|
|
|
| 689 |
css_str = theme_object._get_theme_css()
|
| 690 |
|
| 691 |
def format_arg(val):
|
| 692 |
+
if isinstance(val, str):
|
| 693 |
+
return f"'{val}'"
|
| 694 |
if isinstance(val, list):
|
| 695 |
+
font_list = [(f"gr.themes.GoogleFont('{f.name}')" if isinstance(f, gr.themes.GoogleFont) else f"'{f}'") for f in val]
|
| 696 |
return f"[{', '.join(font_list)}]"
|
| 697 |
return str(val)
|
| 698 |
|
|
|
|
| 705 |
|
| 706 |
except Exception as e:
|
| 707 |
import traceback
|
| 708 |
+
|
| 709 |
traceback.print_exc()
|
| 710 |
+
|
| 711 |
error_msg = str(e)
|
| 712 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 713 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 714 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 715 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 716 |
else:
|
| 717 |
detailed_msg = f"System Error: {error_msg}"
|
| 718 |
+
|
| 719 |
raise gr.Error(detailed_msg)
|
| 720 |
|
| 721 |
|
|
|
|
| 742 |
llm_provider: str = "sambanova",
|
| 743 |
llm_model: str = "Llama-4-Maverick-17B-128E-Instruct",
|
| 744 |
llm_api_key: str | None = None,
|
| 745 |
+
request: gr.Request = None,
|
| 746 |
):
|
| 747 |
"""
|
| 748 |
Generates Python code for a Gradio UI from a sketch image.
|
| 749 |
+
|
| 750 |
Args:
|
| 751 |
sketch_image (Image): The input sketch image.
|
| 752 |
text_description (str): Optional text description.
|
|
|
|
| 762 |
try:
|
| 763 |
messages = [
|
| 764 |
{"role": "system", "content": SYSTEM_PROMPT_SKETCH_EN},
|
| 765 |
+
{
|
| 766 |
+
"role": "user",
|
| 767 |
+
"content": f"Additional text description: {text_description if text_description else 'None'}",
|
| 768 |
+
},
|
| 769 |
]
|
| 770 |
|
| 771 |
+
llm_output = LLMFactory.call(
|
| 772 |
+
llm_provider=llm_provider,
|
| 773 |
+
llm_model=llm_model,
|
| 774 |
+
messages=messages,
|
| 775 |
+
image=sketch_image,
|
| 776 |
+
temperature=0.1,
|
| 777 |
+
max_tokens=8192,
|
| 778 |
+
api_key=llm_api_key,
|
| 779 |
+
)
|
| 780 |
|
| 781 |
if isinstance(llm_output, str) and llm_output.startswith("Error:"):
|
| 782 |
raise Exception(llm_output)
|
|
|
|
| 785 |
return code.strip()
|
| 786 |
except Exception as e:
|
| 787 |
import traceback
|
| 788 |
+
|
| 789 |
traceback.print_exc()
|
| 790 |
+
|
| 791 |
error_msg = str(e)
|
| 792 |
if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
|
| 793 |
detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
|
| 794 |
elif "429" in error_msg or "Rate" in error_msg:
|
| 795 |
+
detailed_msg = "Rate Limit Error: The AI provider is busy. Please wait a moment."
|
| 796 |
else:
|
| 797 |
detailed_msg = f"System Error: {error_msg}"
|
| 798 |
+
|
| 799 |
raise gr.Error(detailed_msg)
|
| 800 |
|
| 801 |
|
|
|
|
| 804 |
|
| 805 |
# region HELPER FUNCTIONS
|
| 806 |
def _validate_api_keys(llm_api_key, request):
|
| 807 |
+
USE_SERVER_KEYS = os.getenv("USE_SERVER_KEYS", "false").lower() in (
|
| 808 |
+
"True",
|
| 809 |
+
"true",
|
| 810 |
+
"1",
|
| 811 |
+
"yes",
|
| 812 |
+
)
|
| 813 |
if not USE_SERVER_KEYS and request and request.headers.get("referer"):
|
| 814 |
if not llm_api_key or not llm_api_key.strip():
|
| 815 |
raise gr.Error("⚠️ LLM API Key Required! Please enter your own API Key to use this tool in the demo UI.")
|
| 816 |
+
|
| 817 |
+
|
| 818 |
def _check_env():
|
| 819 |
if not os.getenv("GOOGLE_API_KEY"):
|
| 820 |
print("WARNING: The GOOGLE_API_KEY environment variable was not set.")
|
| 821 |
if not os.getenv("SAMBANOVA_API_KEY"):
|
| 822 |
print("WARNING: The SAMBANOVA_API_KEY environment variable was not set.")
|
| 823 |
|
| 824 |
+
|
| 825 |
def _inject_theme_helper_js():
|
| 826 |
js_code = """
|
| 827 |
function apply_gradio_theme(css) {
|
|
|
|
| 837 |
"""
|
| 838 |
return {"js": js_code}
|
| 839 |
|
| 840 |
+
|
| 841 |
def _add_interactive_inputs():
|
| 842 |
with gr.Row():
|
| 843 |
with gr.Column(variant="panel", scale=1):
|
| 844 |
gr.Markdown("### Interactive Controls")
|
| 845 |
radio = gr.Radio(["A", "B", "C"], label="Radio")
|
| 846 |
drop = gr.Dropdown(["Option 1", "Option 2"], show_label=False)
|
| 847 |
+
drop_2 = gr.Dropdown(
|
| 848 |
+
["A", "B", "C"],
|
| 849 |
+
multiselect=True,
|
| 850 |
+
value=["A"],
|
| 851 |
+
label="Multiple Dropdown",
|
| 852 |
+
)
|
| 853 |
check = gr.Checkbox(label="Checkbox")
|
| 854 |
with gr.Column(variant="panel", scale=2):
|
| 855 |
+
gr.Image(
|
| 856 |
+
"https://gradio-static-files.s3.us-west-2.amazonaws.com/header-image.jpg",
|
| 857 |
+
label="Image",
|
| 858 |
+
height=320,
|
| 859 |
+
)
|
| 860 |
with gr.Row():
|
| 861 |
gr.Button("Primary", variant="primary")
|
| 862 |
gr.Button("Secondary")
|
|
|
|
| 866 |
gr.Button("Stop", variant="stop", size="sm")
|
| 867 |
return radio, drop, drop_2, check
|
| 868 |
|
| 869 |
+
|
| 870 |
def _add_data_display_components():
|
| 871 |
with gr.Row():
|
| 872 |
gr.Dataframe(value=[[1, 2], [3, 4]], label="Dataframe"), gr.JSON(value={"a": 1}), gr.Label(value={"cat": 0.7}), gr.File()
|
| 873 |
|
| 874 |
+
|
| 875 |
def _add_media_components():
|
| 876 |
with gr.Row():
|
| 877 |
(
|
| 878 |
gr.ColorPicker(label="Color Picker"),
|
| 879 |
gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4"),
|
| 880 |
+
gr.Gallery(
|
| 881 |
+
[
|
| 882 |
+
(
|
| 883 |
+
"https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg",
|
| 884 |
+
"lion",
|
| 885 |
+
)
|
| 886 |
+
],
|
| 887 |
+
height="200px",
|
| 888 |
+
),
|
| 889 |
)
|
| 890 |
|
| 891 |
+
|
| 892 |
def _add_chatbot_component():
|
| 893 |
with gr.Row():
|
| 894 |
with gr.Column(scale=2):
|
|
|
|
| 900 |
gr.Slider(label="Temperature"), gr.Checkbox(label="Streaming")
|
| 901 |
return chatbot, msg_input, add_msg_btn
|
| 902 |
|
| 903 |
+
|
| 904 |
def _create_example_app():
|
| 905 |
with gr.Column(scale=3, elem_id="app"):
|
| 906 |
with gr.Tabs():
|
| 907 |
with gr.TabItem("Common Inputs"):
|
| 908 |
+
gr.Textbox(
|
| 909 |
+
label="Text Box",
|
| 910 |
+
info="A standard text field.",
|
| 911 |
+
placeholder="Write something...",
|
| 912 |
+
)
|
| 913 |
+
gr.Interface(
|
| 914 |
+
lambda x: x,
|
| 915 |
+
"number",
|
| 916 |
+
"textbox",
|
| 917 |
+
title="Interface Component (Compact)",
|
| 918 |
+
show_api=False,
|
| 919 |
+
)
|
| 920 |
with gr.Row():
|
| 921 |
gr.Slider(label="Slider 1")
|
| 922 |
gr.Slider(label="Slider 2")
|
| 923 |
gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
|
| 924 |
radio, drop, drop_2, check = _add_interactive_inputs()
|
| 925 |
gr.Examples(
|
| 926 |
+
examples=[
|
| 927 |
+
["A", "Option 1", ["B"], True],
|
| 928 |
+
["B", "Option 2", ["A", "C"], False],
|
| 929 |
+
],
|
| 930 |
+
inputs=[radio, drop, drop_2, check],
|
| 931 |
+
label="Input Examples",
|
| 932 |
)
|
| 933 |
with gr.TabItem("Data and Media"):
|
| 934 |
_add_data_display_components()
|
|
|
|
| 937 |
chatbot, msg_input, add_msg_btn = _add_chatbot_component()
|
| 938 |
return chatbot, msg_input, add_msg_btn
|
| 939 |
|
| 940 |
+
|
| 941 |
def _create_gradio_lite_html(python_code: str) -> str:
|
| 942 |
"""
|
| 943 |
Wraps the Python code in a Gradio-Lite HTML structure with an iframe.
|
|
|
|
| 945 |
"""
|
| 946 |
# Escape any existing script tags to prevent injection issues inside srcdoc
|
| 947 |
safe_code = python_code.replace("<", "<").replace(">", ">")
|
| 948 |
+
|
| 949 |
# To make sure demo.launch() is present for Lite to render
|
| 950 |
if "demo.launch()" not in safe_code:
|
| 951 |
safe_code += "\n\ndemo.launch()"
|
|
|
|
| 978 |
</div>
|
| 979 |
"""
|
| 980 |
return html_template
|
| 981 |
+
|
| 982 |
+
|
| 983 |
# endregion
|
| 984 |
|
| 985 |
# region MAIN LAYOUT DEFINITION
|
|
|
|
| 996 |
|
| 997 |
gr.Markdown("# 🤖 GitRepo Inspector (MCP Server) 🤖")
|
| 998 |
|
| 999 |
+
# GLOBAL CONFIGURATION
|
| 1000 |
with gr.Accordion("⚙️ Global Configuration (Configure tools here)", open=True):
|
| 1001 |
with gr.Row():
|
| 1002 |
global_github_token = gr.Textbox(
|
| 1003 |
+
label="GitHub Token",
|
| 1004 |
type="password",
|
| 1005 |
+
placeholder="Optional for public repos, required for higher rate limits.",
|
| 1006 |
)
|
| 1007 |
global_api_key = gr.Textbox(
|
| 1008 |
+
label="LLM API Key",
|
| 1009 |
type="password",
|
| 1010 |
+
placeholder="Required for UI demo. (Matches selected Provider)",
|
| 1011 |
)
|
| 1012 |
with gr.Row():
|
| 1013 |
global_provider = gr.Dropdown(
|
| 1014 |
+
choices=list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()),
|
| 1015 |
+
value="gemini",
|
| 1016 |
+
label="LLM Provider (For all tools)",
|
| 1017 |
)
|
| 1018 |
global_model = gr.Dropdown(
|
| 1019 |
+
choices=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"],
|
| 1020 |
+
value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0],
|
| 1021 |
+
label="Main Model (Text/Chat)",
|
| 1022 |
+
interactive=True,
|
| 1023 |
)
|
| 1024 |
+
with gr.Row():
|
| 1025 |
+
models_info_output = gr.JSON(label="Available Models Registry", visible=False)
|
| 1026 |
+
get_models_btn = gr.Button("Refresh Models Info", visible=False)
|
| 1027 |
# -----------------------------
|
| 1028 |
|
| 1029 |
with gr.Tabs():
|
|
|
|
| 1037 |
with gr.TabItem("2. Issue Analyzer"):
|
| 1038 |
gr.Markdown("### 🔍 Analyze a GitHub Issue")
|
| 1039 |
with gr.Row():
|
| 1040 |
+
issue_repo_url = gr.Textbox(
|
| 1041 |
+
label="GitHub Repository URL",
|
| 1042 |
+
value="https://github.com/gradio-app/gradio",
|
| 1043 |
+
)
|
| 1044 |
issue_number_input = gr.Number(label="Issue Number", precision=0)
|
| 1045 |
issue_analyze_button = gr.Button("Analyze Issue 🕵️", variant="primary")
|
| 1046 |
issue_report_output = gr.HTML(label="Analysis Report")
|
|
|
|
| 1062 |
with gr.TabItem("5. Action Runner (Reply & Close)"):
|
| 1063 |
gr.Markdown("### ⚡ Action Runner")
|
| 1064 |
gr.Markdown("This tool allows the Agent (or you) to take action: post comments and close resolved issues.")
|
| 1065 |
+
|
| 1066 |
with gr.Row():
|
| 1067 |
action_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
|
| 1068 |
action_issue_number = gr.Number(label="Issue Number", precision=0)
|
| 1069 |
+
|
| 1070 |
action_comment = gr.Textbox(
|
| 1071 |
+
label="Comment Body (Markdown supported)",
|
| 1072 |
+
lines=5,
|
| 1073 |
+
placeholder="Ex: This issue is resolved in PR #123. Closing now.",
|
| 1074 |
+
)
|
| 1075 |
+
|
| 1076 |
+
action_close_checkbox = gr.Checkbox(
|
| 1077 |
+
label="Close this issue?",
|
| 1078 |
+
value=False,
|
| 1079 |
+
info="Check to verify the fix and close the issue on GitHub.",
|
| 1080 |
)
|
| 1081 |
+
|
| 1082 |
+
action_button = gr.Button("Execute Action 🚀", variant="stop") # Variant stop (vermelho) para indicar cuidado
|
|
|
|
|
|
|
| 1083 |
action_output = gr.Markdown(label="Execution Result")
|
| 1084 |
+
|
| 1085 |
with gr.TabItem("BONUS 1 - Theme Generator"):
|
| 1086 |
gr.Markdown("### 🖌️ Create a Theme with Natural Language")
|
| 1087 |
with gr.Row():
|
| 1088 |
with gr.Column(scale=1, min_width=450):
|
| 1089 |
with gr.Group():
|
| 1090 |
+
theme_prompt_input = gr.Textbox(
|
| 1091 |
+
label="Describe your theme",
|
| 1092 |
+
placeholder="Ex: a dark theme with purple tones...",
|
| 1093 |
+
lines=4,
|
| 1094 |
+
)
|
| 1095 |
theme_generate_button = gr.Button("Generate Theme ✨", variant="primary")
|
| 1096 |
status_output = gr.Textbox(label="Status", interactive=False)
|
| 1097 |
with gr.Tabs():
|
|
|
|
| 1117 |
)
|
| 1118 |
with gr.Row():
|
| 1119 |
with gr.Column(scale=1):
|
| 1120 |
+
sketch_input = gr.Image(
|
| 1121 |
+
type="pil",
|
| 1122 |
+
label="Upload the sketch",
|
| 1123 |
+
sources=["upload", "clipboard"],
|
| 1124 |
+
)
|
| 1125 |
text_desc_input = gr.Textbox(label="Additional Description (Optional)")
|
| 1126 |
sketch_generate_button = gr.Button("1. Generate Code", variant="secondary")
|
| 1127 |
with gr.Column(scale=2):
|
|
|
|
| 1131 |
with gr.TabItem("👀 Live Preview"):
|
| 1132 |
sketch_preview_output = gr.HTML(label="Gradio-Lite Preview")
|
| 1133 |
|
| 1134 |
+
# EVENTS & LOGIC
|
| 1135 |
|
| 1136 |
# Unified Model Updater: Updates BOTH the Text Model (Global) and Vision Model (Sketch) dropdowns
|
| 1137 |
get_models_btn.click(
|
| 1138 |
fn=get_available_models,
|
| 1139 |
inputs=None,
|
| 1140 |
outputs=[models_info_output],
|
| 1141 |
+
api_name="get_available_models",
|
| 1142 |
)
|
| 1143 |
+
|
| 1144 |
def update_all_model_choices(provider):
|
| 1145 |
text_models = AVAILABLE_ISSUE_MODELS_BY_PROVIDER.get(provider, [])
|
| 1146 |
vision_models = AVAILABLE_SKETCH_MODELS_BY_PROVIDER.get(provider, [])
|
| 1147 |
return (
|
| 1148 |
gr.update(choices=text_models, value=text_models[0] if text_models else None),
|
| 1149 |
+
gr.update(choices=vision_models, value=vision_models[0] if vision_models else None),
|
| 1150 |
)
|
| 1151 |
|
| 1152 |
global_provider.change(
|
| 1153 |
fn=update_all_model_choices,
|
| 1154 |
inputs=[global_provider],
|
| 1155 |
+
outputs=[global_model, sketch_model_dropdown], # Update both dropdowns
|
| 1156 |
+
show_api=False,
|
| 1157 |
)
|
| 1158 |
|
| 1159 |
# Tool 1: Sync
|
| 1160 |
sync_button.click(
|
| 1161 |
+
fn=sync_repository,
|
| 1162 |
+
inputs=[sync_repo_url, global_github_token],
|
| 1163 |
+
outputs=[sync_status_output],
|
| 1164 |
+
api_name="sync_repository",
|
| 1165 |
)
|
| 1166 |
|
| 1167 |
# Tool 2: Analyzer
|
| 1168 |
issue_analyze_button.click(
|
| 1169 |
fn=analyze_github_issue,
|
| 1170 |
inputs=[
|
| 1171 |
+
issue_repo_url,
|
| 1172 |
+
issue_number_input,
|
| 1173 |
+
global_provider,
|
| 1174 |
+
global_model,
|
| 1175 |
+
global_github_token,
|
| 1176 |
+
global_api_key,
|
| 1177 |
],
|
| 1178 |
outputs=[issue_report_output],
|
| 1179 |
api_name="analyze_github_issue",
|
|
|
|
| 1183 |
dup_find_button.click(
|
| 1184 |
fn=find_duplicate_issues,
|
| 1185 |
inputs=[
|
| 1186 |
+
dup_repo_url,
|
| 1187 |
+
dup_issue_number,
|
| 1188 |
+
global_provider,
|
| 1189 |
+
global_model,
|
| 1190 |
+
global_github_token,
|
| 1191 |
+
global_api_key,
|
| 1192 |
],
|
| 1193 |
outputs=[dup_report_output],
|
| 1194 |
api_name="find_duplicate_issues",
|
|
|
|
| 1197 |
# Tool 4: Prioritizer
|
| 1198 |
prio_run_button.click(
|
| 1199 |
fn=prioritize_open_issues,
|
| 1200 |
+
inputs=[prio_repo_url, global_provider, global_model, global_api_key],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1201 |
outputs=[prio_report_output],
|
| 1202 |
+
api_name="prioritize_open_issues",
|
| 1203 |
)
|
| 1204 |
|
| 1205 |
# Tool 5: Action Runner
|
| 1206 |
action_button.click(
|
| 1207 |
fn=reply_and_close_issue,
|
| 1208 |
inputs=[
|
| 1209 |
+
action_repo_url,
|
| 1210 |
+
action_issue_number,
|
| 1211 |
+
action_comment,
|
| 1212 |
+
action_close_checkbox,
|
| 1213 |
+
global_github_token,
|
| 1214 |
],
|
| 1215 |
outputs=[action_output],
|
| 1216 |
+
api_name="reply_and_close_issue",
|
| 1217 |
)
|
| 1218 |
+
|
| 1219 |
# Bonus 1: Theme Gen
|
| 1220 |
theme_generate_button.click(
|
| 1221 |
fn=generate_theme,
|
| 1222 |
+
inputs=[theme_prompt_input, global_provider, global_model, global_api_key],
|
| 1223 |
+
outputs=[
|
| 1224 |
+
generated_theme_state,
|
| 1225 |
+
python_code_output,
|
| 1226 |
+
status_output,
|
| 1227 |
+
css_data_transport,
|
| 1228 |
],
|
|
|
|
| 1229 |
).then(
|
| 1230 |
fn=None,
|
| 1231 |
inputs=[css_data_transport],
|
| 1232 |
outputs=None,
|
| 1233 |
js="(css) => { const action = () => apply_gradio_theme(css); if (typeof apply_gradio_theme === 'function') { action(); } else { document.addEventListener('theme-helper-injected', action, { once: true }); } }",
|
| 1234 |
)
|
| 1235 |
+
upload_button.click(
|
| 1236 |
+
fn=handle_upload,
|
| 1237 |
+
inputs=[generated_theme_state, hub_theme_name, hub_hf_token],
|
| 1238 |
+
outputs=[upload_status],
|
| 1239 |
+
)
|
| 1240 |
|
| 1241 |
# Bonus 2: Sketch (Uses Global Provider & Key, but LOCAL Vision Model)
|
| 1242 |
generate_event = sketch_generate_button.click(
|
| 1243 |
fn=generate_ui_from_sketch,
|
| 1244 |
inputs=[
|
| 1245 |
+
sketch_input,
|
| 1246 |
+
text_desc_input,
|
| 1247 |
+
global_provider,
|
| 1248 |
+
sketch_model_dropdown,
|
| 1249 |
+
global_api_key,
|
| 1250 |
],
|
| 1251 |
outputs=[sketch_code_output],
|
| 1252 |
+
)
|
| 1253 |
generate_event.success(
|
| 1254 |
fn=_create_gradio_lite_html,
|
| 1255 |
inputs=[sketch_code_output],
|
| 1256 |
outputs=[sketch_preview_output],
|
| 1257 |
+
show_api=False,
|
| 1258 |
)
|
| 1259 |
+
|
| 1260 |
def add_message_to_chat(history, message):
|
| 1261 |
"""
|
| 1262 |
A simple function to add a user message to the chat history
|
|
|
|
| 1273 |
time.sleep(0.5)
|
| 1274 |
history.append({"role": "assistant", "content": "Thank you for your message!"})
|
| 1275 |
return history
|
| 1276 |
+
|
| 1277 |
# Chat Example Logic
|
| 1278 |
+
add_msg_btn_comp.click(
|
| 1279 |
+
fn=add_message_to_chat,
|
| 1280 |
+
inputs=[chatbot_comp, msg_input_comp],
|
| 1281 |
+
outputs=[chatbot_comp],
|
| 1282 |
+
show_api=False,
|
| 1283 |
+
).then(fn=lambda: "", outputs=[msg_input_comp], show_api=False)
|
| 1284 |
+
|
| 1285 |
# Init
|
| 1286 |
+
app.load(fn=initialize_database, inputs=None, outputs=None, show_api=False).then(
|
| 1287 |
+
fn=_inject_theme_helper_js, inputs=None, outputs=[html_injector], show_api=False
|
| 1288 |
+
)
|
| 1289 |
|
| 1290 |
# endregion
|
| 1291 |
if __name__ == "__main__":
|
| 1292 |
_check_env()
|
| 1293 |
app.allowed_paths = ["."]
|
| 1294 |
+
app.launch(mcp_server=True, server_port=7860)
|
config/clients/gemini_cli.py
CHANGED
|
@@ -14,6 +14,7 @@ BASE_URL = os.getenv("GEMINI_BASE_URL", "https://generativelanguage.googleapis.c
|
|
| 14 |
DEFAULT_TEXT_MODEL = "gemini-2.0-flash" # Default text model
|
| 15 |
VISION_MODEL = "gemini-2.5-flash-lite" # Model with vision capability
|
| 16 |
|
|
|
|
| 17 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 18 |
"""
|
| 19 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
@@ -110,9 +111,18 @@ def call_api(
|
|
| 110 |
|
| 111 |
# Add generation configuration to the payload
|
| 112 |
payload["safetySettings"] = [
|
| 113 |
-
{"category": f"HARM_CATEGORY_{cat}", "threshold": "BLOCK_NONE"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
]
|
| 115 |
-
payload["generationConfig"] = {
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
stream_param = "streamGenerateContent" if stream else "generateContent"
|
| 118 |
request_url = f"{BASE_URL}{model_id}:{stream_param}?key={final_api_key}"
|
|
@@ -123,10 +133,10 @@ def call_api(
|
|
| 123 |
if response.status_code != 200:
|
| 124 |
try:
|
| 125 |
error_details = response.json()
|
| 126 |
-
error_msg = error_details.get(
|
| 127 |
-
except:
|
| 128 |
error_msg = response.text
|
| 129 |
-
|
| 130 |
if response.status_code in [400, 401, 403]:
|
| 131 |
raise ValueError(f"Gemini Auth Error: {error_msg}")
|
| 132 |
else:
|
|
|
|
| 14 |
DEFAULT_TEXT_MODEL = "gemini-2.0-flash" # Default text model
|
| 15 |
VISION_MODEL = "gemini-2.5-flash-lite" # Model with vision capability
|
| 16 |
|
| 17 |
+
|
| 18 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 19 |
"""
|
| 20 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
|
|
| 111 |
|
| 112 |
# Add generation configuration to the payload
|
| 113 |
payload["safetySettings"] = [
|
| 114 |
+
{"category": f"HARM_CATEGORY_{cat}", "threshold": "BLOCK_NONE"}
|
| 115 |
+
for cat in [
|
| 116 |
+
"SEXUALLY_EXPLICIT",
|
| 117 |
+
"HATE_SPEECH",
|
| 118 |
+
"HARASSMENT",
|
| 119 |
+
"DANGEROUS_CONTENT",
|
| 120 |
+
]
|
| 121 |
]
|
| 122 |
+
payload["generationConfig"] = {
|
| 123 |
+
"temperature": temperature,
|
| 124 |
+
"maxOutputTokens": max_tokens,
|
| 125 |
+
}
|
| 126 |
|
| 127 |
stream_param = "streamGenerateContent" if stream else "generateContent"
|
| 128 |
request_url = f"{BASE_URL}{model_id}:{stream_param}?key={final_api_key}"
|
|
|
|
| 133 |
if response.status_code != 200:
|
| 134 |
try:
|
| 135 |
error_details = response.json()
|
| 136 |
+
error_msg = error_details.get("error", {}).get("message", response.text)
|
| 137 |
+
except json.JSONDecodeError:
|
| 138 |
error_msg = response.text
|
| 139 |
+
|
| 140 |
if response.status_code in [400, 401, 403]:
|
| 141 |
raise ValueError(f"Gemini Auth Error: {error_msg}")
|
| 142 |
else:
|
config/clients/nebius_cli.py
CHANGED
|
@@ -3,15 +3,17 @@ import os
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
| 6 |
-
from PIL import Image
|
| 7 |
from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Configuration
|
| 10 |
API_KEY_ENV_VAR = "NEBIUS_API_KEY"
|
| 11 |
-
BASE_URL = os.getenv("NEBIUS_BASE_URL", "https://api.studio.nebius.ai/v1/")
|
| 12 |
DEFAULT_TEXT_MODEL = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 13 |
DEFAULT_VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
| 14 |
|
|
|
|
| 15 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 16 |
"""
|
| 17 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
@@ -20,13 +22,14 @@ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
|
| 20 |
return provided_key.strip()
|
| 21 |
return os.getenv(API_KEY_ENV_VAR)
|
| 22 |
|
|
|
|
| 23 |
def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
|
| 24 |
"""
|
| 25 |
Calls the Nebius AI Studio API (OpenAI-compatible).
|
| 26 |
"""
|
| 27 |
# 1. Resolve API Key
|
| 28 |
final_api_key = _get_api_key(api_key)
|
| 29 |
-
|
| 30 |
if not final_api_key:
|
| 31 |
return "Error: Authentication required. Please provide a Nebius API Key."
|
| 32 |
|
|
@@ -38,28 +41,25 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 38 |
|
| 39 |
# 3. Prepare Messages
|
| 40 |
final_messages = []
|
| 41 |
-
|
| 42 |
if image:
|
| 43 |
print("Making a VISION call to Nebius.")
|
| 44 |
buffered = BytesIO()
|
| 45 |
image.save(buffered, format="PNG")
|
| 46 |
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 47 |
-
|
| 48 |
user_text = "Analyze this image."
|
| 49 |
for msg in messages:
|
| 50 |
if msg["role"] == "user":
|
| 51 |
user_text = msg["content"]
|
| 52 |
break
|
| 53 |
-
|
| 54 |
-
content = [
|
| 55 |
-
|
| 56 |
-
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}
|
| 57 |
-
]
|
| 58 |
-
|
| 59 |
for msg in messages:
|
| 60 |
if msg["role"] == "system":
|
| 61 |
final_messages.append(msg)
|
| 62 |
-
|
| 63 |
final_messages.append({"role": "user", "content": content})
|
| 64 |
final_model = model_name or DEFAULT_VISION_MODEL
|
| 65 |
else:
|
|
@@ -70,11 +70,7 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 70 |
# 4. Call API
|
| 71 |
try:
|
| 72 |
print(f"Calling Nebius API with model: {final_model}")
|
| 73 |
-
completion = client.chat.completions.create(
|
| 74 |
-
model=final_model,
|
| 75 |
-
messages=final_messages,
|
| 76 |
-
**kwargs
|
| 77 |
-
)
|
| 78 |
return completion.choices[0].message.content
|
| 79 |
except AuthenticationError as e:
|
| 80 |
raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
|
|
@@ -83,4 +79,4 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 83 |
except APIConnectionError:
|
| 84 |
raise ConnectionError("Failed to connect to API. Check your internet.")
|
| 85 |
except Exception as e:
|
| 86 |
-
raise RuntimeError(f"API Error: {str(e)}")
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
|
|
|
| 6 |
from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
|
| 10 |
# Configuration
|
| 11 |
API_KEY_ENV_VAR = "NEBIUS_API_KEY"
|
| 12 |
+
BASE_URL = os.getenv("NEBIUS_BASE_URL", "https://api.studio.nebius.ai/v1/")
|
| 13 |
DEFAULT_TEXT_MODEL = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 14 |
DEFAULT_VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
| 15 |
|
| 16 |
+
|
| 17 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 18 |
"""
|
| 19 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
|
|
| 22 |
return provided_key.strip()
|
| 23 |
return os.getenv(API_KEY_ENV_VAR)
|
| 24 |
|
| 25 |
+
|
| 26 |
def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
|
| 27 |
"""
|
| 28 |
Calls the Nebius AI Studio API (OpenAI-compatible).
|
| 29 |
"""
|
| 30 |
# 1. Resolve API Key
|
| 31 |
final_api_key = _get_api_key(api_key)
|
| 32 |
+
|
| 33 |
if not final_api_key:
|
| 34 |
return "Error: Authentication required. Please provide a Nebius API Key."
|
| 35 |
|
|
|
|
| 41 |
|
| 42 |
# 3. Prepare Messages
|
| 43 |
final_messages = []
|
| 44 |
+
|
| 45 |
if image:
|
| 46 |
print("Making a VISION call to Nebius.")
|
| 47 |
buffered = BytesIO()
|
| 48 |
image.save(buffered, format="PNG")
|
| 49 |
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 50 |
+
|
| 51 |
user_text = "Analyze this image."
|
| 52 |
for msg in messages:
|
| 53 |
if msg["role"] == "user":
|
| 54 |
user_text = msg["content"]
|
| 55 |
break
|
| 56 |
+
|
| 57 |
+
content = [{"type": "text", "text": user_text}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}]
|
| 58 |
+
|
|
|
|
|
|
|
|
|
|
| 59 |
for msg in messages:
|
| 60 |
if msg["role"] == "system":
|
| 61 |
final_messages.append(msg)
|
| 62 |
+
|
| 63 |
final_messages.append({"role": "user", "content": content})
|
| 64 |
final_model = model_name or DEFAULT_VISION_MODEL
|
| 65 |
else:
|
|
|
|
| 70 |
# 4. Call API
|
| 71 |
try:
|
| 72 |
print(f"Calling Nebius API with model: {final_model}")
|
| 73 |
+
completion = client.chat.completions.create(model=final_model, messages=final_messages, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
return completion.choices[0].message.content
|
| 75 |
except AuthenticationError as e:
|
| 76 |
raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
|
|
|
|
| 79 |
except APIConnectionError:
|
| 80 |
raise ConnectionError("Failed to connect to API. Check your internet.")
|
| 81 |
except Exception as e:
|
| 82 |
+
raise RuntimeError(f"API Error: {str(e)}")
|
config/clients/openai_cli.py
CHANGED
|
@@ -3,14 +3,16 @@ import os
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
| 6 |
-
from PIL import Image
|
| 7 |
from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Configuration
|
| 10 |
API_KEY_ENV_VAR = "OPENAI_API_KEY"
|
| 11 |
DEFAULT_TEXT_MODEL = "gpt-4o-mini"
|
| 12 |
DEFAULT_VISION_MODEL = "gpt-4o"
|
| 13 |
|
|
|
|
| 14 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 15 |
"""
|
| 16 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
@@ -19,13 +21,14 @@ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
|
| 19 |
return provided_key.strip()
|
| 20 |
return os.getenv(API_KEY_ENV_VAR)
|
| 21 |
|
|
|
|
| 22 |
def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
|
| 23 |
"""
|
| 24 |
Calls the OpenAI API (GPT models).
|
| 25 |
"""
|
| 26 |
# 1. Resolve API Key
|
| 27 |
final_api_key = _get_api_key(api_key)
|
| 28 |
-
|
| 29 |
if not final_api_key:
|
| 30 |
return "Error: Authentication required. Please provide an OpenAI API Key."
|
| 31 |
|
|
@@ -37,32 +40,29 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 37 |
|
| 38 |
# 3. Prepare Messages & Payload
|
| 39 |
final_messages = []
|
| 40 |
-
|
| 41 |
# Handle Vision (Multimodal)
|
| 42 |
if image:
|
| 43 |
print("Making a VISION call to OpenAI.")
|
| 44 |
buffered = BytesIO()
|
| 45 |
image.save(buffered, format="PNG")
|
| 46 |
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 47 |
-
|
| 48 |
# Extract user text from messages to combine with image
|
| 49 |
user_text = "Analyze this image."
|
| 50 |
for msg in messages:
|
| 51 |
if msg["role"] == "user":
|
| 52 |
user_text = msg["content"]
|
| 53 |
break
|
| 54 |
-
|
| 55 |
# Format strictly for OpenAI Vision
|
| 56 |
-
content = [
|
| 57 |
-
|
| 58 |
-
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}
|
| 59 |
-
]
|
| 60 |
-
|
| 61 |
# Add system prompt if exists
|
| 62 |
for msg in messages:
|
| 63 |
if msg["role"] == "system":
|
| 64 |
final_messages.append(msg)
|
| 65 |
-
|
| 66 |
final_messages.append({"role": "user", "content": content})
|
| 67 |
final_model = model_name or DEFAULT_VISION_MODEL
|
| 68 |
|
|
@@ -75,11 +75,7 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 75 |
# 4. Call API
|
| 76 |
try:
|
| 77 |
print(f"Calling OpenAI API with model: {final_model}")
|
| 78 |
-
completion = client.chat.completions.create(
|
| 79 |
-
model=final_model,
|
| 80 |
-
messages=final_messages,
|
| 81 |
-
**kwargs
|
| 82 |
-
)
|
| 83 |
return completion.choices[0].message.content
|
| 84 |
except AuthenticationError as e:
|
| 85 |
raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
|
|
@@ -88,4 +84,4 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 88 |
except APIConnectionError:
|
| 89 |
raise ConnectionError("Failed to connect to API. Check your internet.")
|
| 90 |
except Exception as e:
|
| 91 |
-
raise RuntimeError(f"API Error: {str(e)}")
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
|
|
|
| 6 |
from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
|
| 10 |
# Configuration
|
| 11 |
API_KEY_ENV_VAR = "OPENAI_API_KEY"
|
| 12 |
DEFAULT_TEXT_MODEL = "gpt-4o-mini"
|
| 13 |
DEFAULT_VISION_MODEL = "gpt-4o"
|
| 14 |
|
| 15 |
+
|
| 16 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 17 |
"""
|
| 18 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
|
|
| 21 |
return provided_key.strip()
|
| 22 |
return os.getenv(API_KEY_ENV_VAR)
|
| 23 |
|
| 24 |
+
|
| 25 |
def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
|
| 26 |
"""
|
| 27 |
Calls the OpenAI API (GPT models).
|
| 28 |
"""
|
| 29 |
# 1. Resolve API Key
|
| 30 |
final_api_key = _get_api_key(api_key)
|
| 31 |
+
|
| 32 |
if not final_api_key:
|
| 33 |
return "Error: Authentication required. Please provide an OpenAI API Key."
|
| 34 |
|
|
|
|
| 40 |
|
| 41 |
# 3. Prepare Messages & Payload
|
| 42 |
final_messages = []
|
| 43 |
+
|
| 44 |
# Handle Vision (Multimodal)
|
| 45 |
if image:
|
| 46 |
print("Making a VISION call to OpenAI.")
|
| 47 |
buffered = BytesIO()
|
| 48 |
image.save(buffered, format="PNG")
|
| 49 |
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 50 |
+
|
| 51 |
# Extract user text from messages to combine with image
|
| 52 |
user_text = "Analyze this image."
|
| 53 |
for msg in messages:
|
| 54 |
if msg["role"] == "user":
|
| 55 |
user_text = msg["content"]
|
| 56 |
break
|
| 57 |
+
|
| 58 |
# Format strictly for OpenAI Vision
|
| 59 |
+
content = [{"type": "text", "text": user_text}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}]
|
| 60 |
+
|
|
|
|
|
|
|
|
|
|
| 61 |
# Add system prompt if exists
|
| 62 |
for msg in messages:
|
| 63 |
if msg["role"] == "system":
|
| 64 |
final_messages.append(msg)
|
| 65 |
+
|
| 66 |
final_messages.append({"role": "user", "content": content})
|
| 67 |
final_model = model_name or DEFAULT_VISION_MODEL
|
| 68 |
|
|
|
|
| 75 |
# 4. Call API
|
| 76 |
try:
|
| 77 |
print(f"Calling OpenAI API with model: {final_model}")
|
| 78 |
+
completion = client.chat.completions.create(model=final_model, messages=final_messages, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
return completion.choices[0].message.content
|
| 80 |
except AuthenticationError as e:
|
| 81 |
raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
|
|
|
|
| 84 |
except APIConnectionError:
|
| 85 |
raise ConnectionError("Failed to connect to API. Check your internet.")
|
| 86 |
except Exception as e:
|
| 87 |
+
raise RuntimeError(f"API Error: {str(e)}")
|
config/clients/sambanova_cli.py
CHANGED
|
@@ -3,8 +3,8 @@ import os
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
| 6 |
-
from PIL import Image
|
| 7 |
from openai import APIConnectionError, AuthenticationError, RateLimitError
|
|
|
|
| 8 |
from sambanova import SambaNova
|
| 9 |
|
| 10 |
|
|
@@ -14,6 +14,7 @@ BASE_URL = os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
|
|
| 14 |
DEFAULT_TEXT_MODEL = "Meta-Llama-3.1-8B-Instruct"
|
| 15 |
DEFAULT_VISION_MODEL = "Llama-4-Maverick-17B-128E-Instruct"
|
| 16 |
|
|
|
|
| 17 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 18 |
"""
|
| 19 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
@@ -22,13 +23,20 @@ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
|
| 22 |
return provided_key.strip()
|
| 23 |
return os.getenv(API_KEY_ENV_VAR)
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
"""
|
| 27 |
Calls the SambaNova API, handling text or vision requests.
|
| 28 |
"""
|
| 29 |
-
|
| 30 |
final_api_key = _get_api_key(api_key)
|
| 31 |
-
|
| 32 |
if not final_api_key:
|
| 33 |
return "Error: Authentication required. Please provide a SambaNova API Key."
|
| 34 |
|
|
@@ -47,10 +55,18 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 47 |
|
| 48 |
# Extracts the prompt text
|
| 49 |
# The SambaNova vision API seems to expect a simple text prompt
|
| 50 |
-
user_prompt_text = "\n".join(
|
|
|
|
|
|
|
| 51 |
|
| 52 |
# Assembles the message content for vision
|
| 53 |
-
message_content = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
final_messages = [{"role": "user", "content": message_content}]
|
| 56 |
final_model = model_name or DEFAULT_VISION_MODEL # type: ignore
|
|
@@ -64,13 +80,13 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
|
|
| 64 |
try:
|
| 65 |
print(f"Calling SambaNova API with model: {final_model}")
|
| 66 |
completion = client.chat.completions.create(
|
| 67 |
-
model=final_model,
|
| 68 |
-
messages=final_messages,
|
| 69 |
-
**kwargs
|
| 70 |
)
|
| 71 |
return completion.choices[0].message.content
|
| 72 |
except AuthenticationError as e:
|
| 73 |
-
raise ValueError(
|
|
|
|
|
|
|
| 74 |
except RateLimitError:
|
| 75 |
raise RuntimeError("Rate Limit Exceeded (429). Please try again later.")
|
| 76 |
except APIConnectionError:
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from typing import Dict, List, Optional
|
| 5 |
|
|
|
|
| 6 |
from openai import APIConnectionError, AuthenticationError, RateLimitError
|
| 7 |
+
from PIL import Image
|
| 8 |
from sambanova import SambaNova
|
| 9 |
|
| 10 |
|
|
|
|
| 14 |
DEFAULT_TEXT_MODEL = "Meta-Llama-3.1-8B-Instruct"
|
| 15 |
DEFAULT_VISION_MODEL = "Llama-4-Maverick-17B-128E-Instruct"
|
| 16 |
|
| 17 |
+
|
| 18 |
def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
|
| 19 |
"""
|
| 20 |
Returns the provided key if valid, otherwise falls back to the environment variable.
|
|
|
|
| 23 |
return provided_key.strip()
|
| 24 |
return os.getenv(API_KEY_ENV_VAR)
|
| 25 |
|
| 26 |
+
|
| 27 |
+
def call_api(
|
| 28 |
+
messages: List[Dict],
|
| 29 |
+
model_name: Optional[str] = None,
|
| 30 |
+
image: Optional[Image.Image] = None,
|
| 31 |
+
api_key: Optional[str] = None,
|
| 32 |
+
**kwargs,
|
| 33 |
+
):
|
| 34 |
"""
|
| 35 |
Calls the SambaNova API, handling text or vision requests.
|
| 36 |
"""
|
| 37 |
+
|
| 38 |
final_api_key = _get_api_key(api_key)
|
| 39 |
+
|
| 40 |
if not final_api_key:
|
| 41 |
return "Error: Authentication required. Please provide a SambaNova API Key."
|
| 42 |
|
|
|
|
| 55 |
|
| 56 |
# Extracts the prompt text
|
| 57 |
# The SambaNova vision API seems to expect a simple text prompt
|
| 58 |
+
user_prompt_text = "\n".join(
|
| 59 |
+
[f"{msg['role']}: {msg['content']}" for msg in messages]
|
| 60 |
+
)
|
| 61 |
|
| 62 |
# Assembles the message content for vision
|
| 63 |
+
message_content = [
|
| 64 |
+
{"type": "text", "text": user_prompt_text},
|
| 65 |
+
{
|
| 66 |
+
"type": "image_url",
|
| 67 |
+
"image_url": {"url": f"data:image/png;base64,{image_base64}"},
|
| 68 |
+
},
|
| 69 |
+
]
|
| 70 |
|
| 71 |
final_messages = [{"role": "user", "content": message_content}]
|
| 72 |
final_model = model_name or DEFAULT_VISION_MODEL # type: ignore
|
|
|
|
| 80 |
try:
|
| 81 |
print(f"Calling SambaNova API with model: {final_model}")
|
| 82 |
completion = client.chat.completions.create(
|
| 83 |
+
model=final_model, messages=final_messages, **kwargs
|
|
|
|
|
|
|
| 84 |
)
|
| 85 |
return completion.choices[0].message.content
|
| 86 |
except AuthenticationError as e:
|
| 87 |
+
raise ValueError(
|
| 88 |
+
f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}"
|
| 89 |
+
)
|
| 90 |
except RateLimitError:
|
| 91 |
raise RuntimeError("Rate Limit Exceeded (429). Please try again later.")
|
| 92 |
except APIConnectionError:
|
config/constants.py
CHANGED
|
@@ -9,23 +9,30 @@ PROVIDER_LOGOS = {
|
|
| 9 |
}
|
| 10 |
|
| 11 |
AVAILABLE_SKETCH_MODELS_BY_PROVIDER = {
|
| 12 |
-
"gemini": ["gemini-2.5-flash", "gemini-2.5-flash-lite"],
|
| 13 |
"sambanova": ["Llama-4-Maverick-17B-128E-Instruct"],
|
| 14 |
"openai": ["gpt-4o-mini", "gpt-4o"],
|
| 15 |
-
"nebius": [
|
| 16 |
-
"Qwen/Qwen2.5-VL-72B-Instruct",
|
| 17 |
-
]
|
| 18 |
}
|
| 19 |
AVAILABLE_ISSUE_MODELS_BY_PROVIDER = {
|
| 20 |
"gemini": ["gemini-2.0-flash"],
|
| 21 |
-
"sambanova": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
"openai": ["gpt-4o-mini", "gpt-4o"],
|
| 23 |
"nebius": [
|
| 24 |
-
"deepseek-ai/DeepSeek-R1-0528",
|
| 25 |
"meta-llama/Llama-3.3-70B-Instruct",
|
| 26 |
"nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
|
| 27 |
-
"meta-llama/Meta-Llama-3.1-8B-Instruct-fast"
|
| 28 |
-
]
|
| 29 |
}
|
| 30 |
|
| 31 |
# Prompt template
|
|
@@ -105,9 +112,9 @@ You are an expert Gradio UI developer. Your task is to analyze a sketch and gene
|
|
| 105 |
2. **NO EXTERNAL LIBRARIES:** Only use the `gradio` library.
|
| 106 |
3. **NO FUNCTIONS OR CLASSES:** Start directly with `import gradio as gr` then `with gr.Blocks() as demo:`.
|
| 107 |
4. **NO `demo.launch()`:** Do NOT include the `demo.launch()` call.
|
| 108 |
-
5. **FORMATTING:**
|
| 109 |
- Use standard Python indentation (4 spaces).
|
| 110 |
-
- **DO NOT** put multiple statements on one line using semicolons (`;`).
|
| 111 |
- Each component must be on its own line.
|
| 112 |
6. **STRINGS:** Do NOT use triple quotes (`"""
|
| 113 |
"..."
|
|
|
|
| 9 |
}
|
| 10 |
|
| 11 |
AVAILABLE_SKETCH_MODELS_BY_PROVIDER = {
|
| 12 |
+
"gemini": ["gemini-2.5-flash", "gemini-2.5-flash-lite"],
|
| 13 |
"sambanova": ["Llama-4-Maverick-17B-128E-Instruct"],
|
| 14 |
"openai": ["gpt-4o-mini", "gpt-4o"],
|
| 15 |
+
"nebius": [
|
| 16 |
+
"Qwen/Qwen2.5-VL-72B-Instruct",
|
| 17 |
+
],
|
| 18 |
}
|
| 19 |
AVAILABLE_ISSUE_MODELS_BY_PROVIDER = {
|
| 20 |
"gemini": ["gemini-2.0-flash"],
|
| 21 |
+
"sambanova": [
|
| 22 |
+
"DeepSeek-R1",
|
| 23 |
+
"DeepSeek-V3-0324",
|
| 24 |
+
"DeepSeek-V3.1",
|
| 25 |
+
"Meta-Llama-3.1-8B-Instruct",
|
| 26 |
+
"Meta-Llama-3.3-70B-Instruct",
|
| 27 |
+
"gpt-oss-120b",
|
| 28 |
+
],
|
| 29 |
"openai": ["gpt-4o-mini", "gpt-4o"],
|
| 30 |
"nebius": [
|
| 31 |
+
"deepseek-ai/DeepSeek-R1-0528",
|
| 32 |
"meta-llama/Llama-3.3-70B-Instruct",
|
| 33 |
"nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
|
| 34 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-fast",
|
| 35 |
+
],
|
| 36 |
}
|
| 37 |
|
| 38 |
# Prompt template
|
|
|
|
| 112 |
2. **NO EXTERNAL LIBRARIES:** Only use the `gradio` library.
|
| 113 |
3. **NO FUNCTIONS OR CLASSES:** Start directly with `import gradio as gr` then `with gr.Blocks() as demo:`.
|
| 114 |
4. **NO `demo.launch()`:** Do NOT include the `demo.launch()` call.
|
| 115 |
+
5. **FORMATTING:**
|
| 116 |
- Use standard Python indentation (4 spaces).
|
| 117 |
+
- **DO NOT** put multiple statements on one line using semicolons (`;`).
|
| 118 |
- Each component must be on its own line.
|
| 119 |
6. **STRINGS:** Do NOT use triple quotes (`"""
|
| 120 |
"..."
|
config/database.py
CHANGED
|
@@ -1,16 +1,19 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import psycopg2
|
| 3 |
from psycopg2.extras import DictCursor
|
| 4 |
|
|
|
|
| 5 |
def connect():
|
| 6 |
"""Establishes a connection to the PostgreSQL database using the DATABASE_URL."""
|
| 7 |
database_url = os.getenv("DATABASE_URL")
|
| 8 |
if not database_url:
|
| 9 |
raise ValueError("DATABASE_URL environment variable is not set.")
|
| 10 |
-
|
| 11 |
conn = psycopg2.connect(database_url, cursor_factory=DictCursor)
|
| 12 |
return conn
|
| 13 |
|
|
|
|
| 14 |
def initialize_database():
|
| 15 |
"""Creates the cache tables in PostgreSQL if they don't exist."""
|
| 16 |
conn = connect()
|
|
|
|
| 1 |
import os
|
| 2 |
+
|
| 3 |
import psycopg2
|
| 4 |
from psycopg2.extras import DictCursor
|
| 5 |
|
| 6 |
+
|
| 7 |
def connect():
|
| 8 |
"""Establishes a connection to the PostgreSQL database using the DATABASE_URL."""
|
| 9 |
database_url = os.getenv("DATABASE_URL")
|
| 10 |
if not database_url:
|
| 11 |
raise ValueError("DATABASE_URL environment variable is not set.")
|
| 12 |
+
|
| 13 |
conn = psycopg2.connect(database_url, cursor_factory=DictCursor)
|
| 14 |
return conn
|
| 15 |
|
| 16 |
+
|
| 17 |
def initialize_database():
|
| 18 |
"""Creates the cache tables in PostgreSQL if they don't exist."""
|
| 19 |
conn = connect()
|
config/database_sqlite.py
CHANGED
|
@@ -13,7 +13,7 @@ def connect():
|
|
| 13 |
def initialize_database():
|
| 14 |
"""Creates the cache tables if they don't exist."""
|
| 15 |
conn = connect()
|
| 16 |
-
cursor = conn.cursor()
|
| 17 |
# Table for both Issues and Pull Requests
|
| 18 |
cursor.execute("""
|
| 19 |
CREATE TABLE IF NOT EXISTS items (
|
|
|
|
| 13 |
def initialize_database():
|
| 14 |
"""Creates the cache tables if they don't exist."""
|
| 15 |
conn = connect()
|
| 16 |
+
cursor = conn.cursor()
|
| 17 |
# Table for both Issues and Pull Requests
|
| 18 |
cursor.execute("""
|
| 19 |
CREATE TABLE IF NOT EXISTS items (
|
config/model_factory.py
CHANGED
|
@@ -5,13 +5,15 @@ It abstracts the specific client implementations and provides a unified interfac
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
from typing import Any, Callable, Dict, List, Optional
|
|
|
|
| 8 |
from PIL import Image
|
| 9 |
|
|
|
|
| 10 |
# Step 1: Import the client-specific API call functions
|
| 11 |
# It is assumed that there is a 'clients' directory with modules such as gemini.py, sambanova.py, etc.
|
| 12 |
# Each module should expose a `call_api` function.
|
| 13 |
try:
|
| 14 |
-
from .clients import gemini_cli,
|
| 15 |
except ImportError:
|
| 16 |
print("Warning: Could not import all LLM clients. Please ensure clients/gemini.py, clients/sambanova.py, etc. exist.")
|
| 17 |
|
|
@@ -62,7 +64,14 @@ class LLMFactory:
|
|
| 62 |
return PROVIDER_REGISTRY[provider_key]
|
| 63 |
|
| 64 |
@staticmethod
|
| 65 |
-
def call(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
"""
|
| 67 |
The main factory method. It routes the request to the correct provider client.
|
| 68 |
This method is designed to be the single point of contact for all LLM calls.
|
|
@@ -80,7 +89,7 @@ class LLMFactory:
|
|
| 80 |
str: The text response from the LLM.
|
| 81 |
"""
|
| 82 |
print(f"LLM Factory: Routing call to provider '{llm_provider}' with model '{llm_model or 'default'}'.")
|
| 83 |
-
|
| 84 |
# Get the correct API function from the registry
|
| 85 |
api_call_function = LLMFactory.get_provider(llm_provider)
|
| 86 |
|
|
@@ -96,5 +105,3 @@ class LLMFactory:
|
|
| 96 |
return str(result)
|
| 97 |
|
| 98 |
return result
|
| 99 |
-
|
| 100 |
-
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
from typing import Any, Callable, Dict, List, Optional
|
| 8 |
+
|
| 9 |
from PIL import Image
|
| 10 |
|
| 11 |
+
|
| 12 |
# Step 1: Import the client-specific API call functions
|
| 13 |
# It is assumed that there is a 'clients' directory with modules such as gemini.py, sambanova.py, etc.
|
| 14 |
# Each module should expose a `call_api` function.
|
| 15 |
try:
|
| 16 |
+
from .clients import gemini_cli, nebius_cli, openai_cli, sambanova_cli
|
| 17 |
except ImportError:
|
| 18 |
print("Warning: Could not import all LLM clients. Please ensure clients/gemini.py, clients/sambanova.py, etc. exist.")
|
| 19 |
|
|
|
|
| 64 |
return PROVIDER_REGISTRY[provider_key]
|
| 65 |
|
| 66 |
@staticmethod
|
| 67 |
+
def call(
|
| 68 |
+
llm_provider: str,
|
| 69 |
+
llm_model: Optional[str] = None,
|
| 70 |
+
messages: List[Dict] = [],
|
| 71 |
+
image: Optional[Image.Image] = None,
|
| 72 |
+
api_key: Optional[str] = None,
|
| 73 |
+
**kwargs: Any,
|
| 74 |
+
) -> str:
|
| 75 |
"""
|
| 76 |
The main factory method. It routes the request to the correct provider client.
|
| 77 |
This method is designed to be the single point of contact for all LLM calls.
|
|
|
|
| 89 |
str: The text response from the LLM.
|
| 90 |
"""
|
| 91 |
print(f"LLM Factory: Routing call to provider '{llm_provider}' with model '{llm_model or 'default'}'.")
|
| 92 |
+
|
| 93 |
# Get the correct API function from the registry
|
| 94 |
api_call_function = LLMFactory.get_provider(llm_provider)
|
| 95 |
|
|
|
|
| 105 |
return str(result)
|
| 106 |
|
| 107 |
return result
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -5,5 +5,4 @@ requests
|
|
| 5 |
sambanova
|
| 6 |
markdown
|
| 7 |
psycopg2-binary
|
| 8 |
-
langchain-google-genai
|
| 9 |
openai
|
|
|
|
| 5 |
sambanova
|
| 6 |
markdown
|
| 7 |
psycopg2-binary
|
|
|
|
| 8 |
openai
|