elismasilva commited on
Commit
be53f95
·
1 Parent(s): 89705d4

update changes

Browse files
app.py CHANGED
@@ -17,8 +17,8 @@ from config.constants import (
17
  SYSTEM_PROMPT_DUPLICATE_FINDER_EN,
18
  SYSTEM_PROMPT_ISSUE_ANALYZER_EN,
19
  SYSTEM_PROMPT_PRIORITIZER_EN,
20
- SYSTEM_PROMPT_SKETCH,
21
- SYSTEM_PROMPT_THEME_GENERATOR,
22
  )
23
  from config.database import connect, initialize_database
24
  from config.model_factory import LLMFactory
@@ -30,6 +30,17 @@ load_dotenv()
30
 
31
 
32
  # region TOOLS FUNCTIONS
 
 
 
 
 
 
 
 
 
 
 
33
  def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None, desc: str = ""):
34
  """Helper function to fetch all pages from a paginated GitHub API endpoint."""
35
  results = []
@@ -59,15 +70,13 @@ def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None
59
  def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.Progress(track_tqdm=True)):
60
  """
61
  Performs a lightweight sync of a GitHub repository with the local database.
62
- It fetches only open issues and the most recent releases to ensure quick updates.
63
-
64
  Args:
65
- repo_url (str): The URL of the GitHub repository.
66
- github_token (str | None): An optional GitHub token for higher API rate limits.
67
- progress (gr.Progress): A Gradio Progress object to report status.
68
-
69
  Returns:
70
- str: A status message indicating completion or failure.
71
  """
72
  if not repo_url:
73
  raise gr.Error("Please provide a repository URL.")
@@ -84,7 +93,7 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
84
  open_items = fetch_all_pages(issues_url, headers)
85
  for item in progress.tqdm(open_items, desc="Syncing Open Issues"):
86
  if "pull_request" in item:
87
- continue # We only want to sync actual issues here
88
  item_labels = json.dumps([label["name"] for label in item["labels"]])
89
 
90
  cursor.execute(
@@ -109,7 +118,7 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
109
  item.get("body"),
110
  item["state"],
111
  item["user"]["login"],
112
- item_labels, # Use the dumped JSON string here
113
  item["comments"],
114
  item.get("reactions", {}).get("total_count", 0),
115
  item["created_at"],
@@ -118,18 +127,7 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
118
  ),
119
  )
120
 
121
- # 2. Sync RECENT releases only
122
- # releases_url = f"https://api.github.com/repos/{owner}/{repo}/releases?per_page=50"
123
- # latest_releases = requests.get(releases_url, headers=headers).json()
124
- # for release in progress.tqdm(latest_releases, desc="Syncing Recent Releases"):
125
- # cursor.execute(
126
- # """
127
- # INSERT INTO releases (repo, tag_name, name, body, published_at)
128
- # VALUES (%s, %s, %s, %s, %s)
129
- # ON CONFLICT (tag_name) DO NOTHING;
130
- # """,
131
- # (repo_slug, release["tag_name"], release["name"], release.get("body"), release["published_at"]),
132
- # )
133
  releases_url = f"https://api.github.com/repos/{owner}/{repo}/releases?per_page=100"
134
  all_releases = fetch_all_pages(releases_url, headers, progress, desc="Syncing All Releases")
135
  for release in progress.tqdm(all_releases, desc="Saving Releases"):
@@ -147,30 +145,44 @@ def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.
147
 
148
  except Exception as e:
149
  import traceback
150
-
151
  traceback.print_exc()
152
- raise gr.Error(f"Sync failed: {e}")
 
 
 
 
 
 
 
153
 
154
 
155
  def analyze_github_issue(
156
- repo_url: str, issue_number: str, llm_provider: str, llm_model: str, github_token: str | None = None, progress=gr.Progress(track_tqdm=True)
 
 
 
 
 
 
 
157
  ):
158
  """
159
- Analyzes a single GitHub issue by fetching its context (details, PRs, comments)
160
- just-in-time and cross-referencing it with release notes from the local cache.
161
 
162
  Args:
163
- repo_url (str): The URL of the GitHub repository.
164
- issue_number (str): The number of the issue to analyze.
165
- llm_provider (str): The provider of the large language model.
166
- llm_model (str): The specific model to use for analysis.
167
- github_token (str | None): An optional GitHub token for API calls.
168
- progress (gr.Progress): A Gradio Progress object to report status.
169
-
170
  Returns:
171
- str: An HTML-formatted analysis report or an error message.
172
  """
173
  try:
 
 
174
  if not repo_url or not issue_number:
175
  return "## Error\nPlease provide both a repository URL and an issue number."
176
 
@@ -201,7 +213,6 @@ def analyze_github_issue(
201
  pr_num = event["source"]["issue"]["number"]
202
  pr_urls_to_fetch.append(f"https://api.github.com/repos/{repo_slug}/pulls/{pr_num}")
203
 
204
- # Fetch PR details
205
  for i, url in enumerate(pr_urls_to_fetch):
206
  progress(0.4 + (0.2 * (i / len(pr_urls_to_fetch))) if pr_urls_to_fetch else 0.4, desc=f"Fetching details for PR {i + 1}/{len(pr_urls_to_fetch)}...")
207
  pr_res = requests.get(url, headers=headers)
@@ -210,8 +221,8 @@ def analyze_github_issue(
210
 
211
  # 2. GET RELEASE DATA FROM CACHE
212
  progress(0.6, desc="Querying local release cache...")
213
- releases_cursor = cursor.execute("SELECT * FROM releases WHERE repo = %s ORDER BY published_at DESC LIMIT 50", (repo_slug,))
214
- releases = releases_cursor.fetchall()
215
  conn.close()
216
 
217
  # 3. SUMMARIZE COMMENTS
@@ -222,12 +233,16 @@ def analyze_github_issue(
222
  try:
223
  summarizer_prompt = SYSTEM_PROMPT_COMMENT_SUMMARIZER_EN.format(comments_text=comments_text)
224
  additional_problems_summary = LLMFactory.call(
225
- llm_provider, llm_model, messages=[{"role": "user", "content": summarizer_prompt}], temperature=0.0
 
 
 
 
226
  )
227
  except Exception:
228
  additional_problems_summary = "Could not summarize comments due to an error."
229
 
230
- # 4. FORMAT CONTEXT FOR MAIN ANALYSIS LLM
231
  progress(0.8, desc="Preparing final analysis...")
232
 
233
  pull_requests_summary = (
@@ -243,7 +258,7 @@ def analyze_github_issue(
243
 
244
  release_notes_summary = (
245
  "\n\n".join(
246
- [f"- **Release {r['tag_name']} ({r['published_at'][:10]}):**\n{str(r['body'])[:500] if r['body'] else 'No description.'}..." for r in releases]
247
  )
248
  if releases
249
  else "No releases found in local cache."
@@ -265,10 +280,15 @@ def analyze_github_issue(
265
  progress(0.9, desc="Generating final report with AI...")
266
 
267
  messages = [{"role": "user", "content": final_prompt}]
268
- raw_output = LLMFactory.call(llm_provider, llm_model, messages=messages, temperature=0.1, max_tokens=2048)
269
- if isinstance(raw_output, str) and raw_output.startswith("Error:"):
270
- raise Exception(raw_output)
271
-
 
 
 
 
 
272
  report_start_marker = f"## Analysis of Issue #{issue_number}"
273
  report_start_index = raw_output.find(report_start_marker)
274
 
@@ -291,37 +311,50 @@ def analyze_github_issue(
291
  </span>
292
  </div>
293
  """
294
-
295
- # Convert the LLM's Markdown analysis to HTML
296
  analysis_html = markdown.markdown(analysis.strip())
 
297
 
298
- # Combine the two HTML parts
299
- final_html_output = provider_seal_html + analysis_html
300
- return final_html_output
301
  except Exception as e:
302
  import traceback
303
-
304
  traceback.print_exc()
305
- return f"## Analysis Error\n**Details:** `{str(e)}`"
306
-
307
-
308
- def find_duplicate_issues(repo_url: str, issue_number: str, llm_provider: str, llm_model: str, github_token: str | None = None, progress=gr.Progress()):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  """
310
- Finds potential duplicate issues for a given issue.
311
- It combines a real-time search for mentions in the issue's timeline with a
312
- keyword-based search against the local cache of open issues.
313
-
314
  Args:
315
- repo_url (str): The URL of the GitHub repository.
316
- issue_number (str): The number of the main issue to check for duplicates.
317
- llm_provider (str): The provider of the large language model.
318
- llm_model (str): The specific model to use for analysis.
319
- github_token (str | None): An optional GitHub token for API calls.
320
- progress (gr.Progress): A Gradio Progress object to report status.
321
-
322
  Returns:
323
- str: A Markdown-formatted report of potential duplicates.
324
  """
 
 
325
  if not repo_url or not issue_number:
326
  return "Please provide the repository and the main issue number."
327
 
@@ -333,12 +366,14 @@ def find_duplicate_issues(repo_url: str, issue_number: str, llm_provider: str, l
333
  cursor = conn.cursor()
334
 
335
  progress(0, desc="Fetching main issue from cache...")
336
- main_issue = cursor.execute("SELECT * FROM items WHERE repo = %s AND number = %s", (repo_slug, issue_number)).fetchone()
 
 
337
  if not main_issue:
338
  conn.close()
339
  return "Main issue not found in cache. Please synchronize the repository first."
340
 
341
- # STEP 1: GATHER MENTIONS VIA TIMELINE API
342
  progress(0.2, desc="Fetching timeline for mentions...")
343
  timeline_api_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/timeline?per_page=100"
344
  timeline_events = fetch_all_pages(timeline_api_url, headers)
@@ -354,13 +389,14 @@ def find_duplicate_issues(repo_url: str, issue_number: str, llm_provider: str, l
354
  if not mentions_summary:
355
  mentions_summary = "No other issues or PRs were found mentioning this issue in its timeline."
356
 
357
- # STEP 2: SEARCH FOR CANDIDATES BY KEYWORD (FROM CACHE)
358
  progress(0.5, desc="Searching for candidates in cache...")
359
  main_issue_title = main_issue["title"]
360
  keywords = [word for word in re.findall(r"\b\w+\b", main_issue_title) if len(word) > 3]
361
  query_conditions = " OR ".join([f"title LIKE '%{keyword}%'" for keyword in keywords])
362
- candidate_query = f"SELECT * FROM items WHERE repo = '{repo_slug}' AND is_pr = 0 AND state = 'open' AND ({query_conditions})"
363
- candidate_issues = cursor.execute(candidate_query).fetchall()
 
364
  conn.close()
365
 
366
  candidate_issues_summary = ""
@@ -373,7 +409,7 @@ def find_duplicate_issues(repo_url: str, issue_number: str, llm_provider: str, l
373
  if not candidate_issues_summary:
374
  candidate_issues_summary = "No similar open issues found via keyword search."
375
 
376
- # STEP 3: FORMAT CONTEXT AND CALL LLM
377
  progress(0.8, desc="Analyzing for duplicates with AI...")
378
  prompt_context = {
379
  "main_issue_number": main_issue["number"],
@@ -385,31 +421,51 @@ def find_duplicate_issues(repo_url: str, issue_number: str, llm_provider: str, l
385
  final_prompt = SYSTEM_PROMPT_DUPLICATE_FINDER_EN.format(**prompt_context)
386
 
387
  messages = [{"role": "user", "content": final_prompt}]
388
- analysis = LLMFactory.call(llm_provider, llm_model, messages=messages, temperature=0.0)
389
-
 
 
 
 
 
390
  return analysis
391
 
392
  except Exception as e:
393
  import traceback
394
-
395
  traceback.print_exc()
396
- return f"## Error Finding Duplicates\n**Details:** `{str(e)}`"
397
-
398
-
399
- def prioritize_open_issues(repo_url: str, llm_provider: str, llm_model: str, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  """
401
- Analyzes open issues from the local cache to create a prioritized backlog for developers.
402
- It fetches issues based on engagement (comments, reactions) and uses an LLM to rank them.
403
-
404
  Args:
405
  repo_url (str): The URL of the GitHub repository.
406
- llm_provider (str): The provider of the large language model.
407
- llm_model (str): The specific model to use for prioritization.
408
- progress (gr.Progress): A Gradio Progress object to report status.
409
-
410
  Returns:
411
- str: A Markdown-formatted prioritized list of issues.
412
  """
 
 
413
  if not repo_url:
414
  return "Please provide the repository URL."
415
 
@@ -418,22 +474,20 @@ def prioritize_open_issues(repo_url: str, llm_provider: str, llm_model: str, pro
418
  conn = connect()
419
  cursor = conn.cursor()
420
 
421
- # 1. Fetch all open issues from the cache, ordered by engagement
422
  progress(0, desc="Fetching open issues from cache...")
423
- # Order by comments and reactions to get the most "active" issues
424
- open_issues = cursor.execute(
425
- "SELECT * FROM items WHERE repo = %s AND is_pr = 0 AND state = 'open' ORDER BY comments DESC, reactions DESC LIMIT 50", (repo_slug,)
426
- ).fetchall()
427
  conn.close()
428
 
429
  if not open_issues:
430
  return "No open issues found in the cache to prioritize."
431
 
432
- # 2. Format the context for the LLM
433
  progress(0.5, desc="Preparing context for prioritization...")
434
  issues_list_summary = ""
435
  for issue in open_issues:
436
- labels = json.loads(issue["labels"]) if issue["labels"] else "None"
437
  issues_list_summary += (
438
  f'- **Issue #{issue["number"]}**: "{issue["title"]}"\n'
439
  f" - **Labels**: {labels}\n"
@@ -443,27 +497,114 @@ def prioritize_open_issues(repo_url: str, llm_provider: str, llm_model: str, pro
443
  prompt_context = {"issues_list_summary": issues_list_summary}
444
  final_prompt = SYSTEM_PROMPT_PRIORITIZER_EN.format(**prompt_context)
445
 
446
- # 3. Call LLM for analysis
447
  progress(0.8, desc="Generating priority list with AI...")
448
  messages = [{"role": "user", "content": final_prompt}]
449
- analysis = LLMFactory.call(llm_provider, llm_model, messages=messages, temperature=0.1, max_tokens=4096)
450
-
 
 
 
 
 
 
451
  return analysis
452
 
453
  except Exception as e:
454
  import traceback
455
-
456
  traceback.print_exc()
457
- return f"## Error During Prioritization\n**Details:** `{str(e)}`"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
 
 
 
 
 
 
 
 
459
 
460
- def generate_theme(prompt: str, llm_provider: str, llm_model: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461
  """
462
- Tool 1 function: Generates a complete theme from a text prompt.
 
 
 
 
 
 
463
  """
 
 
464
  try:
465
- messages = [{"role": "system", "content": SYSTEM_PROMPT_THEME_GENERATOR}, {"role": "user", "content": prompt}]
466
- llm_output = LLMFactory.call(llm_provider=llm_provider, llm_model=llm_model, messages=messages, temperature=0.1, max_tokens=2048)
467
 
468
  if isinstance(llm_output, str) and llm_output.startswith("Error:"):
469
  raise Exception(llm_output)
@@ -474,6 +615,7 @@ def generate_theme(prompt: str, llm_provider: str, llm_model: str):
474
  constructor_kwargs = theme_config.constructor_args.model_dump(exclude_unset=True)
475
  set_kwargs = theme_config.set_args.model_dump(exclude_unset=True)
476
 
 
477
  for key in list(set_kwargs.keys()):
478
  value = set_kwargs[key]
479
  if isinstance(value, str) and value.startswith("*"):
@@ -481,10 +623,8 @@ def generate_theme(prompt: str, llm_provider: str, llm_model: str):
481
  name, shade = value[1:].rsplit("_", 1)
482
  shade_num = int(shade)
483
  if not (50 <= shade_num <= 950):
484
- print(f"WARNING: Invalid color ref: {value}. Ignoring.")
485
  del set_kwargs[key]
486
  except (ValueError, IndexError):
487
- print(f"WARNING: Malformed color ref: {value}. Ignoring.")
488
  del set_kwargs[key]
489
 
490
  if "font" in constructor_kwargs:
@@ -504,8 +644,7 @@ def generate_theme(prompt: str, llm_provider: str, llm_model: str):
504
  css_str = theme_object._get_theme_css()
505
 
506
  def format_arg(val):
507
- if isinstance(val, str):
508
- return f"'{val}'"
509
  if isinstance(val, list):
510
  font_list = [f"gr.themes.GoogleFont('{f.name}')" if isinstance(f, gr.themes.GoogleFont) else f"'{f}'" for f in val]
511
  return f"[{', '.join(font_list)}]"
@@ -519,21 +658,23 @@ def generate_theme(prompt: str, llm_provider: str, llm_model: str):
519
  return theme_object, py_code, status_message, css_str
520
 
521
  except Exception as e:
522
- error_message = f"An error occurred: {e}."
523
- return None, "", error_message, ""
 
 
 
 
 
 
 
 
 
 
524
 
525
 
526
  def handle_upload(theme_obj, theme_name, hf_token):
527
  """
528
- Handles uploading the generated theme to Hugging Face Hub.
529
-
530
- Args:
531
- theme_obj (gr.themes.Base): The Gradio theme object to be uploaded.
532
- theme_name (str): The desired name for the repository on the Hub.
533
- hf_token (str): The Hugging Face API token for authentication.
534
-
535
- Returns:
536
- gr.Markdown: A Gradio Markdown component with the success or error message.
537
  """
538
  if not isinstance(theme_obj, gr.themes.Base):
539
  return gr.Markdown("⚠️ Please generate a theme first.", visible=True)
@@ -548,59 +689,74 @@ def handle_upload(theme_obj, theme_name, hf_token):
548
  return gr.Markdown(f"❌ **Error:** {e}", visible=True)
549
 
550
 
551
- def generate_ui_from_sketch(sketch_image, text_description: str, llm_provider: str, llm_model: str):
 
 
 
 
 
 
 
552
  """
553
- Generates Python code for a Gradio UI from a sketch image and an optional text description.
554
-
555
  Args:
556
- sketch_image (Image): The sketch image provided.
557
- text_description (str): Additional text description to guide the generation.
558
- llm_provider (str): The provider of the large language model.
559
- llm_model (str): The specific model to use for generation.
560
-
561
- Returns:
562
- str: The generated Python code as a string, or an error message.
563
  """
 
 
564
  if sketch_image is None:
565
  return "# Please draw or upload an image."
566
 
567
  try:
568
  messages = [
569
- {"role": "system", "content": SYSTEM_PROMPT_SKETCH},
570
  {"role": "user", "content": f"Additional text description: {text_description if text_description else 'None'}"},
571
  ]
572
 
573
- llm_output = LLMFactory.call(llm_provider=llm_provider, llm_model=llm_model, messages=messages, image=sketch_image, temperature=0.1, max_tokens=8192)
574
 
575
  if isinstance(llm_output, str) and llm_output.startswith("Error:"):
576
  raise Exception(llm_output)
577
 
578
  code = llm_output.split("```python\n")[1].split("\n```")[0] if "```python" in llm_output else llm_output
579
-
580
  return code.strip()
581
  except Exception as e:
582
- error_message = f"# An error occurred while generating the UI: {e}"
583
- return error_message
 
 
 
 
 
 
 
 
 
 
584
 
585
 
586
  # endregion
587
 
588
 
589
  # region HELPER FUNCTIONS
590
- def check_env():
591
- """
592
- Checks for the presence of required environment variables and prints a warning if they are not set.
593
- """
 
 
 
594
  if not os.getenv("GOOGLE_API_KEY"):
595
  print("WARNING: The GOOGLE_API_KEY environment variable was not set.")
596
  if not os.getenv("SAMBANOVA_API_KEY"):
597
  print("WARNING: The SAMBANOVA_API_KEY environment variable was not set.")
598
 
599
-
600
- def inject_theme_helper_js():
601
- """
602
- Called by app.load() of the main page. Returns a dictionary for HTMLInjector.
603
- """
604
  js_code = """
605
  function apply_gradio_theme(css) {
606
  let style_tag = document.getElementById('dynamic_theme_css');
@@ -610,20 +766,12 @@ def inject_theme_helper_js():
610
  document.head.appendChild(style_tag);
611
  }
612
  style_tag.innerHTML = css;
613
- console.log('✅ Helper: Theme applied via global function.');
614
  }
615
  document.dispatchEvent(new CustomEvent('theme-helper-injected'));
616
- console.log('Injector: apply_gradio_theme() function injected and event fired.');
617
  """
618
  return {"js": js_code}
619
 
620
-
621
- def add_interactive_inputs():
622
- """
623
- Creates a set of interactive input components for the theme preview.
624
- Returns:
625
- tuple: A tuple of the created Gradio components.
626
- """
627
  with gr.Row():
628
  with gr.Column(variant="panel", scale=1):
629
  gr.Markdown("### Interactive Controls")
@@ -640,22 +788,13 @@ def add_interactive_inputs():
640
  gr.Button("Small", size="sm")
641
  gr.UploadButton(size="sm")
642
  gr.Button("Stop", variant="stop", size="sm")
643
-
644
  return radio, drop, drop_2, check
645
 
646
-
647
- def add_data_display_components():
648
- """
649
- Creates a set of data display components for the theme preview.
650
- """
651
  with gr.Row():
652
  gr.Dataframe(value=[[1, 2], [3, 4]], label="Dataframe"), gr.JSON(value={"a": 1}), gr.Label(value={"cat": 0.7}), gr.File()
653
 
654
-
655
- def add_media_components():
656
- """
657
- Creates a set of media-related components for the theme preview.
658
- """
659
  with gr.Row():
660
  (
661
  gr.ColorPicker(label="Color Picker"),
@@ -663,13 +802,7 @@ def add_media_components():
663
  gr.Gallery([("https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg", "lion")], height="200px"),
664
  )
665
 
666
-
667
- def add_chatbot_component():
668
- """
669
- Creates a chatbot interface for the theme preview.
670
- Returns:
671
- tuple: A tuple of the created Gradio components (chatbot, input, button).
672
- """
673
  with gr.Row():
674
  with gr.Column(scale=2):
675
  chatbot = gr.Chatbot([{"role": "user", "content": "Hello"}], label="Chatbot", type="messages")
@@ -680,14 +813,7 @@ def add_chatbot_component():
680
  gr.Slider(label="Temperature"), gr.Checkbox(label="Streaming")
681
  return chatbot, msg_input, add_msg_btn
682
 
683
-
684
- def create_example_app():
685
- """
686
- Builds the complete example application used for theme previewing.
687
- It assembles various components into a tabbed interface.
688
- Returns:
689
- tuple: A tuple of components from the chatbot interface for event handling.
690
- """
691
  with gr.Column(scale=3, elem_id="app"):
692
  with gr.Tabs():
693
  with gr.TabItem("Common Inputs"):
@@ -697,29 +823,63 @@ def create_example_app():
697
  gr.Slider(label="Slider 1")
698
  gr.Slider(label="Slider 2")
699
  gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
700
- radio, drop, drop_2, check = add_interactive_inputs()
701
  gr.Examples(
702
  examples=[["A", "Option 1", ["B"], True], ["B", "Option 2", ["A", "C"], False]], inputs=[radio, drop, drop_2, check], label="Input Examples"
703
  )
704
  with gr.TabItem("Data and Media"):
705
- add_data_display_components()
706
- add_media_components()
707
-
708
  with gr.TabItem("Layout and Chat"):
709
- chatbot, msg_input, add_msg_btn = add_chatbot_component()
710
-
711
  return chatbot, msg_input, add_msg_btn
712
 
713
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714
  # endregion
715
 
716
  # region MAIN LAYOUT DEFINITION
717
  css = """
718
  .gradio-container { max-width: none !important; }
719
- .fillable {
720
- width: 100% !important;
721
- max-width: unset !important;
722
- }
723
  #app { height: 100vh; overflow-y: scroll; }
724
  """
725
 
@@ -730,73 +890,89 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
730
 
731
  gr.Markdown("# 🤖 GitRepo Inspector (MCP Server) 🤖")
732
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
  with gr.Tabs():
734
  with gr.TabItem("1. Sync Repo"):
735
  gr.Markdown("### 🔄 Synchronize Repository with Local Cache")
736
- gr.Markdown(
737
- "Before using the analysis tools, fetch the latest data from a GitHub repository. This sync fetches open issues and recent releases, making other tools faster."
738
- )
739
  sync_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
740
- sync_github_token = gr.Textbox(label="GitHub Token (Recommended for higher rate limits)", type="password")
741
  sync_button = gr.Button("Start Synchronization", variant="primary")
742
  sync_status_output = gr.Textbox(label="Sync Status", interactive=False)
743
 
744
  with gr.TabItem("2. Issue Analyzer"):
745
  gr.Markdown("### 🔍 Analyze a GitHub Issue")
746
- gr.Markdown(
747
- "Provide a repository URL and an issue number to check if it has been addressed in a recent release or PR, using a combination of real-time data and the local cache."
748
- )
749
  with gr.Row():
750
- llm_issue_provider_dropdown = gr.Dropdown(choices=list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()), value="gemini", label="LLM Provider")
751
- llm_issue_model_dropdown = gr.Dropdown(
752
- choices=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"], value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0], label="Model", interactive=True
753
- )
754
- with gr.Row():
755
- issue_repo_url = gr.Textbox(
756
- label="GitHub Repository URL", value="https://github.com/gradio-app/gradio", placeholder="https://github.com/gradio-app/gradio"
757
- )
758
  issue_number_input = gr.Number(label="Issue Number", precision=0)
759
-
760
- issue_github_token = gr.Textbox(label="GitHub Token (Optional, for higher rate limits)", type="password")
761
  issue_analyze_button = gr.Button("Analyze Issue 🕵️", variant="primary")
762
  issue_report_output = gr.HTML(label="Analysis Report")
763
 
764
  with gr.TabItem("3. Issue Duplicate Finder"):
765
  gr.Markdown("### 👯 Find Duplicate Issues")
766
- gr.Markdown("Provide a repository and a main issue number to find potential duplicates using the local cache and timeline mentions.")
767
  with gr.Row():
768
  dup_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
769
  dup_issue_number = gr.Number(label="Main Issue #", precision=0)
770
- with gr.Row():
771
- dup_llm_provider = gr.Dropdown(list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()), value="gemini", label="LLM Provider")
772
- dup_llm_model = gr.Dropdown(
773
- AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"], value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0], label="Model", interactive=True
774
- )
775
- dup_github_token = gr.Textbox(label="GitHub Token (Optional)", type="password")
776
  dup_find_button = gr.Button("Find Duplicates", variant="primary")
777
  dup_report_output = gr.Markdown(label="Duplicate Report")
778
 
779
  with gr.TabItem("4. Issue Prioritizer"):
780
  gr.Markdown("### 🥇 Prioritize Backlog")
781
- gr.Markdown("Analyzes the most engaged open issues from the local cache and generates a priority list.")
782
  prio_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
783
- with gr.Row():
784
- prio_llm_provider = gr.Dropdown(list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()), value="gemini", label="LLM Provider")
785
- prio_llm_model = gr.Dropdown(
786
- AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"], value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0], label="Model", interactive=True
787
- )
788
  prio_run_button = gr.Button("Generate Priority List", variant="primary")
789
  prio_report_output = gr.Markdown(label="Prioritized Backlog")
790
 
791
- with gr.TabItem("BONUS 1 - Theme Generator"):
 
 
 
792
  with gr.Row():
793
- llm_theme_provider_dropdown = gr.Dropdown(choices=list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()), value="gemini", label="LLM Provider")
794
- llm_theme_model_dropdown = gr.Dropdown(
795
- choices=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"], value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0], label="Model", interactive=True
796
- )
 
 
 
 
 
 
 
 
 
 
 
 
797
  with gr.Row():
798
  with gr.Column(scale=1, min_width=450):
799
- gr.Markdown("### 🖌️ Create a Theme with Natural Language")
800
  with gr.Group():
801
  theme_prompt_input = gr.Textbox(label="Describe your theme", placeholder="Ex: a dark theme with purple tones...", lines=4)
802
  theme_generate_button = gr.Button("Generate Theme ✨", variant="primary")
@@ -805,22 +981,21 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
805
  with gr.TabItem("Code to Use"):
806
  python_code_output = gr.Code(label="Copy and paste", language="python")
807
  with gr.TabItem("Publish to Hub"):
808
- gr.Markdown("Share your creation!")
809
  hub_theme_name = gr.Textbox(label="Theme Name", placeholder="my-amazing-theme")
810
  hub_hf_token = gr.Textbox(label="HF Token", type="password", placeholder="hf_...")
811
  upload_button = gr.Button("Upload 🚀")
812
  upload_status = gr.Markdown(visible=False)
813
-
814
- chatbot_comp, msg_input_comp, add_msg_btn_comp = create_example_app()
815
 
816
  with gr.TabItem("BONUS 2 - Sketch Image to Gradio UI"):
817
  gr.Markdown("### 🖼️ Create a UI from a Sketch")
 
818
  with gr.Row():
819
- llm_sketch_provider_dropdown = gr.Dropdown(choices=list(AVAILABLE_SKETCH_MODELS_BY_PROVIDER.keys()), value="sambanova", label="LLM Provider")
820
- llm_sketch_model_dropdown = gr.Dropdown(
821
- choices=AVAILABLE_SKETCH_MODELS_BY_PROVIDER["sambanova"],
822
- value=AVAILABLE_SKETCH_MODELS_BY_PROVIDER["sambanova"][0],
823
- label="Model",
824
  interactive=True,
825
  )
826
  with gr.Row():
@@ -829,68 +1004,102 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
829
  text_desc_input = gr.Textbox(label="Additional Description (Optional)")
830
  sketch_generate_button = gr.Button("1. Generate Code", variant="secondary")
831
  with gr.Column(scale=2):
832
- sketch_code_output = gr.Code(label="Generated UI Code", language="python", lines=20)
 
 
 
 
833
 
834
- # Events binding
835
- # Tool 1: Sync Repo
836
- sync_button.click(fn=sync_repository, inputs=[sync_repo_url, sync_github_token], outputs=[sync_status_output], api_name="sync_repository")
837
 
838
- # Tool 2: Issue Analyzer
839
- def update_model_choices(provider):
840
- """
841
- Updates the model dropdown choices when the provider changes.
842
- """
843
- models = AVAILABLE_ISSUE_MODELS_BY_PROVIDER.get(provider, [])
844
- return gr.update(choices=models, value=models[0] if models else None)
 
 
 
 
 
 
 
845
 
846
- llm_issue_provider_dropdown.change(
847
- fn=update_model_choices,
848
- inputs=[llm_issue_provider_dropdown],
849
- outputs=[llm_issue_model_dropdown],
850
  show_api=False
851
  )
 
 
 
 
 
 
 
 
 
 
852
  issue_analyze_button.click(
853
  fn=analyze_github_issue,
854
- inputs=[issue_repo_url, issue_number_input, llm_issue_provider_dropdown, llm_issue_model_dropdown, issue_github_token],
 
 
 
 
855
  outputs=[issue_report_output],
856
  api_name="analyze_github_issue",
857
  )
858
- # Tool 3: Issue Duplicate Finder
859
- dup_llm_provider.change(fn=update_model_choices,
860
- inputs=[dup_llm_provider],
861
- outputs=[dup_llm_model],
862
- show_api=False
863
- )
864
  dup_find_button.click(
865
  fn=find_duplicate_issues,
866
- inputs=[dup_repo_url, dup_issue_number, dup_llm_provider, dup_llm_model, dup_github_token],
 
 
 
 
867
  outputs=[dup_report_output],
868
  api_name="find_duplicate_issues",
869
  )
870
- # Tool 4: Issue Prioritizer
871
- prio_llm_provider.change(
872
- fn=update_model_choices,
873
- inputs=[prio_llm_provider],
874
- outputs=[prio_llm_model],
875
- show_api=False
876
- )
877
  prio_run_button.click(
878
  fn=prioritize_open_issues,
879
- inputs=[prio_repo_url, prio_llm_provider, prio_llm_model],
 
 
 
 
880
  outputs=[prio_report_output],
881
  api_name="prioritize_open_issues"
882
  )
883
 
884
- # region BONUS 1: Theme Generator
885
- llm_theme_provider_dropdown.change(
886
- fn=update_model_choices,
887
- inputs=[llm_theme_provider_dropdown],
888
- outputs=[llm_theme_model_dropdown],
889
- show_api=False
 
 
 
 
 
 
890
  )
 
 
891
  theme_generate_button.click(
892
  fn=generate_theme,
893
- inputs=[theme_prompt_input, llm_theme_provider_dropdown, llm_theme_model_dropdown],
 
 
 
 
894
  outputs=[generated_theme_state, python_code_output, status_output, css_data_transport],
895
  ).then(
896
  fn=None,
@@ -898,12 +1107,25 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
898
  outputs=None,
899
  js="(css) => { const action = () => apply_gradio_theme(css); if (typeof apply_gradio_theme === 'function') { action(); } else { document.addEventListener('theme-helper-injected', action, { once: true }); } }",
900
  )
 
901
 
902
- upload_button.click(fn=handle_upload,
903
- inputs=[generated_theme_state, hub_theme_name, hub_hf_token],
904
- outputs=[upload_status]
 
 
 
 
 
 
 
 
 
 
 
 
905
  )
906
-
907
  def add_message_to_chat(history, message):
908
  """
909
  A simple function to add a user message to the chat history
@@ -920,53 +1142,15 @@ with gr.Blocks(css=css, title="GitRepo Inspector (MCP Server)") as app:
920
  time.sleep(0.5)
921
  history.append({"role": "assistant", "content": "Thank you for your message!"})
922
  return history
923
-
924
- add_msg_btn_comp.click(
925
- fn=add_message_to_chat,
926
- inputs=[chatbot_comp, msg_input_comp],
927
- outputs=[chatbot_comp],
928
- show_api=False
929
- ).then(
930
- fn=lambda: "",
931
- outputs=[msg_input_comp],
932
- show_api=False
933
- )
934
- # endregion
935
-
936
- # region BONUS 2 : Sketch to UI
937
- def update_sketch_model_choices(provider):
938
- """
939
- Updates the sketch-to-ui model dropdown choices when the provider changes.
940
- """
941
- models = AVAILABLE_SKETCH_MODELS_BY_PROVIDER.get(provider, [])
942
- return gr.update(choices=models, value=models[0] if models else None)
943
-
944
- llm_sketch_provider_dropdown.change(fn=update_sketch_model_choices, inputs=[llm_sketch_provider_dropdown], outputs=[llm_sketch_model_dropdown])
945
-
946
- sketch_generate_button.click(
947
- fn=generate_ui_from_sketch,
948
- inputs=[sketch_input, text_desc_input, llm_sketch_provider_dropdown, llm_sketch_model_dropdown],
949
- outputs=[sketch_code_output],
950
- )
951
- # endregion
952
- app.load(
953
- fn=initialize_database,
954
- inputs=None,
955
- outputs=None,
956
- show_api=False
957
- ).then(
958
- fn=inject_theme_helper_js,
959
- inputs=None,
960
- outputs=[html_injector],
961
- show_api=False
962
- )
963
 
964
  # endregion
965
  if __name__ == "__main__":
966
- check_env()
967
-
968
  app.allowed_paths = ["."]
969
- app.launch(
970
- mcp_server=True,
971
- server_port=7860,
972
- )
 
17
  SYSTEM_PROMPT_DUPLICATE_FINDER_EN,
18
  SYSTEM_PROMPT_ISSUE_ANALYZER_EN,
19
  SYSTEM_PROMPT_PRIORITIZER_EN,
20
+ SYSTEM_PROMPT_SKETCH_EN,
21
+ SYSTEM_PROMPT_THEME_GENERATOR_EN,
22
  )
23
  from config.database import connect, initialize_database
24
  from config.model_factory import LLMFactory
 
30
 
31
 
32
  # region TOOLS FUNCTIONS
33
+ def get_available_models():
34
+ """
35
+ Returns a JSON object listing all available LLM providers and their models.
36
+ Useful for agents to dynamically select valid parameters.
37
+ """
38
+ return {
39
+ "text_models": AVAILABLE_ISSUE_MODELS_BY_PROVIDER,
40
+ "vision_models": AVAILABLE_SKETCH_MODELS_BY_PROVIDER,
41
+ "providers": list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys())
42
+ }
43
+
44
  def fetch_all_pages(url: str, headers: dict, progress: gr.Progress | None = None, desc: str = ""):
45
  """Helper function to fetch all pages from a paginated GitHub API endpoint."""
46
  results = []
 
70
  def sync_repository(repo_url: str, github_token: str | None = None, progress=gr.Progress(track_tqdm=True)):
71
  """
72
  Performs a lightweight sync of a GitHub repository with the local database.
73
+
 
74
  Args:
75
+ repo_url (str): The full URL of the GitHub repository (e.g., 'https://github.com/gradio-app/gradio').
76
+ github_token (str, optional): A GitHub Personal Access Token. Optional. If not provided, the tool will work but may hit public API rate limits.
77
+
 
78
  Returns:
79
+ str: A status message indicating completion.
80
  """
81
  if not repo_url:
82
  raise gr.Error("Please provide a repository URL.")
 
93
  open_items = fetch_all_pages(issues_url, headers)
94
  for item in progress.tqdm(open_items, desc="Syncing Open Issues"):
95
  if "pull_request" in item:
96
+ continue
97
  item_labels = json.dumps([label["name"] for label in item["labels"]])
98
 
99
  cursor.execute(
 
118
  item.get("body"),
119
  item["state"],
120
  item["user"]["login"],
121
+ item_labels,
122
  item["comments"],
123
  item.get("reactions", {}).get("total_count", 0),
124
  item["created_at"],
 
127
  ),
128
  )
129
 
130
+ # 2. Sync ALL releases
 
 
 
 
 
 
 
 
 
 
 
131
  releases_url = f"https://api.github.com/repos/{owner}/{repo}/releases?per_page=100"
132
  all_releases = fetch_all_pages(releases_url, headers, progress, desc="Syncing All Releases")
133
  for release in progress.tqdm(all_releases, desc="Saving Releases"):
 
145
 
146
  except Exception as e:
147
  import traceback
 
148
  traceback.print_exc()
149
+
150
+ error_msg = str(e)
151
+ if "429" in error_msg or "Rate" in error_msg:
152
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
153
+ else:
154
+ detailed_msg = f"System Error: {error_msg}"
155
+
156
+ raise gr.Error(detailed_msg)
157
 
158
 
159
  def analyze_github_issue(
160
+ repo_url: str,
161
+ issue_number: str,
162
+ llm_provider: str = "gemini",
163
+ llm_model: str = "gemini-2.0-flash",
164
+ github_token: str | None = None,
165
+ llm_api_key: str | None = None,
166
+ request: gr.Request = None,
167
+ progress=gr.Progress(track_tqdm=True)
168
  ):
169
  """
170
+ Analyzes a single GitHub issue to determine its resolution status by checking PRs and Releases.
 
171
 
172
  Args:
173
+ repo_url (str): The full URL of the GitHub repository.
174
+ issue_number (str): The issue number to analyze (e.g., '1234').
175
+ llm_provider (str, optional): The LLM provider to use. Defaults to 'gemini'.
176
+ llm_model (str, optional): The specific model name. Defaults to 'gemini-2.0-flash'.
177
+ github_token (str, optional): GitHub Token. Optional. Recommended for higher rate limits.
178
+ llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided (empty string), the server will use its internal environment keys to perform the analysis.
179
+
180
  Returns:
181
+ str: An HTML-formatted analysis report.
182
  """
183
  try:
184
+ _validate_api_keys(llm_api_key, request)
185
+
186
  if not repo_url or not issue_number:
187
  return "## Error\nPlease provide both a repository URL and an issue number."
188
 
 
213
  pr_num = event["source"]["issue"]["number"]
214
  pr_urls_to_fetch.append(f"https://api.github.com/repos/{repo_slug}/pulls/{pr_num}")
215
 
 
216
  for i, url in enumerate(pr_urls_to_fetch):
217
  progress(0.4 + (0.2 * (i / len(pr_urls_to_fetch))) if pr_urls_to_fetch else 0.4, desc=f"Fetching details for PR {i + 1}/{len(pr_urls_to_fetch)}...")
218
  pr_res = requests.get(url, headers=headers)
 
221
 
222
  # 2. GET RELEASE DATA FROM CACHE
223
  progress(0.6, desc="Querying local release cache...")
224
+ cursor.execute("SELECT * FROM releases WHERE repo = %s ORDER BY published_at DESC LIMIT 50", (repo_slug,))
225
+ releases = cursor.fetchall()
226
  conn.close()
227
 
228
  # 3. SUMMARIZE COMMENTS
 
233
  try:
234
  summarizer_prompt = SYSTEM_PROMPT_COMMENT_SUMMARIZER_EN.format(comments_text=comments_text)
235
  additional_problems_summary = LLMFactory.call(
236
+ llm_provider,
237
+ llm_model,
238
+ messages=[{"role": "user", "content": summarizer_prompt}],
239
+ temperature=0.0,
240
+ api_key=llm_api_key
241
  )
242
  except Exception:
243
  additional_problems_summary = "Could not summarize comments due to an error."
244
 
245
+ # 4. FORMAT CONTEXT
246
  progress(0.8, desc="Preparing final analysis...")
247
 
248
  pull_requests_summary = (
 
258
 
259
  release_notes_summary = (
260
  "\n\n".join(
261
+ [f"- **Release {r['tag_name']} ({str(r['published_at'])[:10]}):**\n{str(r['body'])[:500] if r['body'] else 'No description.'}..." for r in releases]
262
  )
263
  if releases
264
  else "No releases found in local cache."
 
280
  progress(0.9, desc="Generating final report with AI...")
281
 
282
  messages = [{"role": "user", "content": final_prompt}]
283
+ raw_output = LLMFactory.call(
284
+ llm_provider,
285
+ llm_model,
286
+ messages=messages,
287
+ temperature=0.1,
288
+ max_tokens=2048,
289
+ api_key=llm_api_key
290
+ )
291
+
292
  report_start_marker = f"## Analysis of Issue #{issue_number}"
293
  report_start_index = raw_output.find(report_start_marker)
294
 
 
311
  </span>
312
  </div>
313
  """
 
 
314
  analysis_html = markdown.markdown(analysis.strip())
315
+ return provider_seal_html + analysis_html
316
 
 
 
 
317
  except Exception as e:
318
  import traceback
 
319
  traceback.print_exc()
320
+
321
+ error_msg = str(e)
322
+ if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
323
+ detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
324
+ elif "429" in error_msg or "Rate" in error_msg:
325
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
326
+ else:
327
+ detailed_msg = f"System Error: {error_msg}"
328
+
329
+ raise gr.Error(detailed_msg)
330
+
331
+
332
+ def find_duplicate_issues(
333
+ repo_url: str,
334
+ issue_number: str,
335
+ llm_provider: str = "gemini",
336
+ llm_model: str = "gemini-2.0-flash",
337
+ github_token: str | None = None,
338
+ llm_api_key: str | None = None,
339
+ request: gr.Request = None,
340
+ progress=gr.Progress()
341
+ ):
342
  """
343
+ Finds potential duplicate issues for a given issue using mentions and keyword search.
344
+
 
 
345
  Args:
346
+ repo_url (str): The full URL of the GitHub repository.
347
+ issue_number (str): The main issue number to check.
348
+ llm_provider (str, optional): The LLM provider. Defaults to 'gemini'.
349
+ llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
350
+ github_token (str, optional): GitHub Token. Optional.
351
+ llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
352
+
353
  Returns:
354
+ str: A Markdown list of potential duplicates.
355
  """
356
+ _validate_api_keys(llm_api_key, request)
357
+
358
  if not repo_url or not issue_number:
359
  return "Please provide the repository and the main issue number."
360
 
 
366
  cursor = conn.cursor()
367
 
368
  progress(0, desc="Fetching main issue from cache...")
369
+ cursor.execute("SELECT * FROM items WHERE repo = %s AND number = %s", (repo_slug, issue_number))
370
+ main_issue = cursor.fetchone()
371
+
372
  if not main_issue:
373
  conn.close()
374
  return "Main issue not found in cache. Please synchronize the repository first."
375
 
376
+ # STEP 1: GATHER MENTIONS
377
  progress(0.2, desc="Fetching timeline for mentions...")
378
  timeline_api_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/timeline?per_page=100"
379
  timeline_events = fetch_all_pages(timeline_api_url, headers)
 
389
  if not mentions_summary:
390
  mentions_summary = "No other issues or PRs were found mentioning this issue in its timeline."
391
 
392
+ # STEP 2: SEARCH CANDIDATES
393
  progress(0.5, desc="Searching for candidates in cache...")
394
  main_issue_title = main_issue["title"]
395
  keywords = [word for word in re.findall(r"\b\w+\b", main_issue_title) if len(word) > 3]
396
  query_conditions = " OR ".join([f"title LIKE '%{keyword}%'" for keyword in keywords])
397
+ candidate_query = f"SELECT * FROM items WHERE repo = '{repo_slug}' AND is_pr = FALSE AND state = 'open' AND ({query_conditions})"
398
+ cursor.execute(candidate_query)
399
+ candidate_issues = cursor.fetchall()
400
  conn.close()
401
 
402
  candidate_issues_summary = ""
 
409
  if not candidate_issues_summary:
410
  candidate_issues_summary = "No similar open issues found via keyword search."
411
 
412
+ # STEP 3: LLM ANALYSIS
413
  progress(0.8, desc="Analyzing for duplicates with AI...")
414
  prompt_context = {
415
  "main_issue_number": main_issue["number"],
 
421
  final_prompt = SYSTEM_PROMPT_DUPLICATE_FINDER_EN.format(**prompt_context)
422
 
423
  messages = [{"role": "user", "content": final_prompt}]
424
+ analysis = LLMFactory.call(
425
+ llm_provider,
426
+ llm_model,
427
+ messages=messages,
428
+ temperature=0.0,
429
+ api_key=llm_api_key
430
+ )
431
  return analysis
432
 
433
  except Exception as e:
434
  import traceback
 
435
  traceback.print_exc()
436
+
437
+ error_msg = str(e)
438
+ if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
439
+ detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
440
+ elif "429" in error_msg or "Rate" in error_msg:
441
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
442
+ else:
443
+ detailed_msg = f"System Error: {error_msg}"
444
+
445
+ raise gr.Error(detailed_msg)
446
+
447
+ def prioritize_open_issues(
448
+ repo_url: str,
449
+ llm_provider: str = "gemini",
450
+ llm_model: str = "gemini-2.0-flash",
451
+ llm_api_key: str | None = None,
452
+ request: gr.Request = None,
453
+ progress=gr.Progress(track_tqdm=True)
454
+ ):
455
  """
456
+ Analyzes open issues from the cache to create a prioritized backlog.
457
+
 
458
  Args:
459
  repo_url (str): The URL of the GitHub repository.
460
+ llm_provider (str, optional): The LLM provider. Defaults to 'gemini'.
461
+ llm_model (str, optional): The model name. Defaults to 'gemini-2.0-flash'.
462
+ llm_api_key (str, optional): API Key for the LLM. **OPTIONAL**. If not provided, the server uses its own keys.
463
+
464
  Returns:
465
+ str: A Markdown priority list.
466
  """
467
+ _validate_api_keys(llm_api_key, request)
468
+
469
  if not repo_url:
470
  return "Please provide the repository URL."
471
 
 
474
  conn = connect()
475
  cursor = conn.cursor()
476
 
 
477
  progress(0, desc="Fetching open issues from cache...")
478
+ cursor.execute(
479
+ "SELECT * FROM items WHERE repo = %s AND is_pr = FALSE AND state = 'open' ORDER BY comments DESC, reactions DESC LIMIT 50", (repo_slug,)
480
+ )
481
+ open_issues = cursor.fetchall()
482
  conn.close()
483
 
484
  if not open_issues:
485
  return "No open issues found in the cache to prioritize."
486
 
 
487
  progress(0.5, desc="Preparing context for prioritization...")
488
  issues_list_summary = ""
489
  for issue in open_issues:
490
+ labels = issue["labels"] if issue["labels"] else "None"
491
  issues_list_summary += (
492
  f'- **Issue #{issue["number"]}**: "{issue["title"]}"\n'
493
  f" - **Labels**: {labels}\n"
 
497
  prompt_context = {"issues_list_summary": issues_list_summary}
498
  final_prompt = SYSTEM_PROMPT_PRIORITIZER_EN.format(**prompt_context)
499
 
 
500
  progress(0.8, desc="Generating priority list with AI...")
501
  messages = [{"role": "user", "content": final_prompt}]
502
+ analysis = LLMFactory.call(
503
+ llm_provider,
504
+ llm_model,
505
+ messages=messages,
506
+ temperature=0.1,
507
+ max_tokens=4096,
508
+ api_key=llm_api_key
509
+ )
510
  return analysis
511
 
512
  except Exception as e:
513
  import traceback
 
514
  traceback.print_exc()
515
+
516
+ error_msg = str(e)
517
+ if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
518
+ detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
519
+ elif "429" in error_msg or "Rate" in error_msg:
520
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
521
+ else:
522
+ detailed_msg = f"System Error: {error_msg}"
523
+
524
+ raise gr.Error(detailed_msg)
525
+
526
+ def reply_and_close_issue(
527
+ repo_url: str,
528
+ issue_number: str,
529
+ comment_body: str,
530
+ close_issue: bool = False,
531
+ github_token: str | None = None,
532
+ request: gr.Request = None
533
+ ):
534
+ """
535
+ Posts a comment on a GitHub issue and optionally closes it.
536
+ Crucial for the 'Actionable AI' capability.
537
+
538
+ Args:
539
+ repo_url (str): Full repository URL.
540
+ issue_number (str): The issue number.
541
+ comment_body (str): The markdown text to post as a comment.
542
+ close_issue (bool): If True, changes the issue state to 'closed'.
543
+ github_token (str): MANDATORY. A GitHub token with write permissions.
544
+ """
545
+ # Security check: This tool ALWAYS requires a token via API or UI
546
+ if not github_token:
547
+ raise gr.Error("⚠️ Write Permission Error: A GitHub Token is mandatory for posting comments or closing issues.")
548
 
549
+ try:
550
+ owner, repo = repo_url.strip().replace("https://github.com/", "").split("/")
551
+ repo_slug = f"{owner}/{repo}"
552
+ headers = {
553
+ "Authorization": f"token {github_token}",
554
+ "Accept": "application/vnd.github.v3+json"
555
+ }
556
 
557
+ # 1. Post the Comment
558
+ if comment_body and comment_body.strip():
559
+ comment_url = f"https://api.github.com/repos/{repo_slug}/issues/{issue_number}/comments"
560
+ comment_resp = requests.post(comment_url, headers=headers, json={"body": comment_body})
561
+ comment_resp.raise_for_status()
562
+ action_log = f"✅ Comment posted on issue #{issue_number}."
563
+ else:
564
+ action_log = "ℹ️ No comment body provided, skipping comment."
565
+
566
+ # 2. Close the Issue (if requested)
567
+ if close_issue:
568
+ issue_url = f"https://api.github.com/repos/{repo_slug}/issues/{issue_number}"
569
+ # state_reason can be 'completed' or 'not_planned'
570
+ close_resp = requests.patch(issue_url, headers=headers, json={"state": "closed", "state_reason": "completed"})
571
+ close_resp.raise_for_status()
572
+ action_log += f"\n🔒 Issue #{issue_number} has been CLOSED."
573
+
574
+ return f"## Success\n{action_log}"
575
+
576
+ except requests.exceptions.HTTPError as e:
577
+ if e.response.status_code == 404:
578
+ raise gr.Error("Error 404: Repo or Issue not found (or Token lacks permission).")
579
+ if e.response.status_code == 401:
580
+ raise gr.Error("Error 401: Invalid GitHub Token.")
581
+ raise gr.Error(f"GitHub API Error: {e}")
582
+ except Exception as e:
583
+ import traceback
584
+ traceback.print_exc()
585
+ raise gr.Error(f"System Error: {str(e)}")
586
+
587
+ def generate_theme(
588
+ prompt: str,
589
+ llm_provider: str = "gemini",
590
+ llm_model: str = "gemini-2.0-flash",
591
+ llm_api_key: str | None = None,
592
+ request: gr.Request = None
593
+ ):
594
  """
595
+ Generates a Gradio theme based on a text prompt.
596
+
597
+ Args:
598
+ prompt (str): Description of the desired theme.
599
+ llm_provider (str, optional): LLM provider. Defaults to 'gemini'.
600
+ llm_model (str, optional): Model name. Defaults to 'gemini-2.0-flash'.
601
+ llm_api_key (str, optional): API Key. **OPTIONAL**. If empty, uses server keys.
602
  """
603
+ _validate_api_keys(llm_api_key, request)
604
+
605
  try:
606
+ messages = [{"role": "system", "content": SYSTEM_PROMPT_THEME_GENERATOR_EN}, {"role": "user", "content": prompt}]
607
+ llm_output = LLMFactory.call(llm_provider=llm_provider, llm_model=llm_model, messages=messages, temperature=0.1, max_tokens=2048, api_key=llm_api_key)
608
 
609
  if isinstance(llm_output, str) and llm_output.startswith("Error:"):
610
  raise Exception(llm_output)
 
615
  constructor_kwargs = theme_config.constructor_args.model_dump(exclude_unset=True)
616
  set_kwargs = theme_config.set_args.model_dump(exclude_unset=True)
617
 
618
+ # Clean up color refs
619
  for key in list(set_kwargs.keys()):
620
  value = set_kwargs[key]
621
  if isinstance(value, str) and value.startswith("*"):
 
623
  name, shade = value[1:].rsplit("_", 1)
624
  shade_num = int(shade)
625
  if not (50 <= shade_num <= 950):
 
626
  del set_kwargs[key]
627
  except (ValueError, IndexError):
 
628
  del set_kwargs[key]
629
 
630
  if "font" in constructor_kwargs:
 
644
  css_str = theme_object._get_theme_css()
645
 
646
  def format_arg(val):
647
+ if isinstance(val, str): return f"'{val}'"
 
648
  if isinstance(val, list):
649
  font_list = [f"gr.themes.GoogleFont('{f.name}')" if isinstance(f, gr.themes.GoogleFont) else f"'{f}'" for f in val]
650
  return f"[{', '.join(font_list)}]"
 
658
  return theme_object, py_code, status_message, css_str
659
 
660
  except Exception as e:
661
+ import traceback
662
+ traceback.print_exc()
663
+
664
+ error_msg = str(e)
665
+ if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
666
+ detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
667
+ elif "429" in error_msg or "Rate" in error_msg:
668
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
669
+ else:
670
+ detailed_msg = f"System Error: {error_msg}"
671
+
672
+ raise gr.Error(detailed_msg)
673
 
674
 
675
  def handle_upload(theme_obj, theme_name, hf_token):
676
  """
677
+ Handles uploading the generated theme.
 
 
 
 
 
 
 
 
678
  """
679
  if not isinstance(theme_obj, gr.themes.Base):
680
  return gr.Markdown("⚠️ Please generate a theme first.", visible=True)
 
689
  return gr.Markdown(f"❌ **Error:** {e}", visible=True)
690
 
691
 
692
+ def generate_ui_from_sketch(
693
+ sketch_image,
694
+ text_description: str,
695
+ llm_provider: str = "sambanova",
696
+ llm_model: str = "Llama-4-Maverick-17B-128E-Instruct",
697
+ llm_api_key: str | None = None,
698
+ request: gr.Request = None
699
+ ):
700
  """
701
+ Generates Python code for a Gradio UI from a sketch image.
702
+
703
  Args:
704
+ sketch_image (Image): The input sketch image.
705
+ text_description (str): Optional text description.
706
+ llm_provider (str, optional): Defaults to 'sambanova'.
707
+ llm_model (str, optional): Defaults to 'Llama-4-Maverick-17B-128E-Instruct'.
708
+ llm_api_key (str, optional): API Key. **OPTIONAL**. If empty, uses server keys.
 
 
709
  """
710
+ _validate_api_keys(llm_api_key, request)
711
+
712
  if sketch_image is None:
713
  return "# Please draw or upload an image."
714
 
715
  try:
716
  messages = [
717
+ {"role": "system", "content": SYSTEM_PROMPT_SKETCH_EN},
718
  {"role": "user", "content": f"Additional text description: {text_description if text_description else 'None'}"},
719
  ]
720
 
721
+ llm_output = LLMFactory.call(llm_provider=llm_provider, llm_model=llm_model, messages=messages, image=sketch_image, temperature=0.1, max_tokens=8192, api_key=llm_api_key)
722
 
723
  if isinstance(llm_output, str) and llm_output.startswith("Error:"):
724
  raise Exception(llm_output)
725
 
726
  code = llm_output.split("```python\n")[1].split("\n```")[0] if "```python" in llm_output else llm_output
 
727
  return code.strip()
728
  except Exception as e:
729
+ import traceback
730
+ traceback.print_exc()
731
+
732
+ error_msg = str(e)
733
+ if "Auth" in error_msg or "Key" in error_msg or "401" in error_msg:
734
+ detailed_msg = f"Authentication Error: The provided API Key for {llm_provider} is invalid or expired."
735
+ elif "429" in error_msg or "Rate" in error_msg:
736
+ detailed_msg = f"Rate Limit Error: The AI provider is busy. Please wait a moment."
737
+ else:
738
+ detailed_msg = f"System Error: {error_msg}"
739
+
740
+ raise gr.Error(detailed_msg)
741
 
742
 
743
  # endregion
744
 
745
 
746
  # region HELPER FUNCTIONS
747
+ def _validate_api_keys(llm_api_key, request):
748
+ USE_SERVER_KEYS = os.getenv("USE_SERVER_KEYS", "false").lower() in ("True", "true", "1", "yes")
749
+ if not USE_SERVER_KEYS and request and request.headers.get("referer"):
750
+ if not llm_api_key or not llm_api_key.strip():
751
+ raise gr.Error("⚠️ LLM API Key Required! Please enter your own API Key to use this tool in the demo UI.")
752
+
753
+ def _check_env():
754
  if not os.getenv("GOOGLE_API_KEY"):
755
  print("WARNING: The GOOGLE_API_KEY environment variable was not set.")
756
  if not os.getenv("SAMBANOVA_API_KEY"):
757
  print("WARNING: The SAMBANOVA_API_KEY environment variable was not set.")
758
 
759
+ def _inject_theme_helper_js():
 
 
 
 
760
  js_code = """
761
  function apply_gradio_theme(css) {
762
  let style_tag = document.getElementById('dynamic_theme_css');
 
766
  document.head.appendChild(style_tag);
767
  }
768
  style_tag.innerHTML = css;
 
769
  }
770
  document.dispatchEvent(new CustomEvent('theme-helper-injected'));
 
771
  """
772
  return {"js": js_code}
773
 
774
+ def _add_interactive_inputs():
 
 
 
 
 
 
775
  with gr.Row():
776
  with gr.Column(variant="panel", scale=1):
777
  gr.Markdown("### Interactive Controls")
 
788
  gr.Button("Small", size="sm")
789
  gr.UploadButton(size="sm")
790
  gr.Button("Stop", variant="stop", size="sm")
 
791
  return radio, drop, drop_2, check
792
 
793
+ def _add_data_display_components():
 
 
 
 
794
  with gr.Row():
795
  gr.Dataframe(value=[[1, 2], [3, 4]], label="Dataframe"), gr.JSON(value={"a": 1}), gr.Label(value={"cat": 0.7}), gr.File()
796
 
797
+ def _add_media_components():
 
 
 
 
798
  with gr.Row():
799
  (
800
  gr.ColorPicker(label="Color Picker"),
 
802
  gr.Gallery([("https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg", "lion")], height="200px"),
803
  )
804
 
805
+ def _add_chatbot_component():
 
 
 
 
 
 
806
  with gr.Row():
807
  with gr.Column(scale=2):
808
  chatbot = gr.Chatbot([{"role": "user", "content": "Hello"}], label="Chatbot", type="messages")
 
813
  gr.Slider(label="Temperature"), gr.Checkbox(label="Streaming")
814
  return chatbot, msg_input, add_msg_btn
815
 
816
+ def _create_example_app():
 
 
 
 
 
 
 
817
  with gr.Column(scale=3, elem_id="app"):
818
  with gr.Tabs():
819
  with gr.TabItem("Common Inputs"):
 
823
  gr.Slider(label="Slider 1")
824
  gr.Slider(label="Slider 2")
825
  gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group")
826
+ radio, drop, drop_2, check = _add_interactive_inputs()
827
  gr.Examples(
828
  examples=[["A", "Option 1", ["B"], True], ["B", "Option 2", ["A", "C"], False]], inputs=[radio, drop, drop_2, check], label="Input Examples"
829
  )
830
  with gr.TabItem("Data and Media"):
831
+ _add_data_display_components()
832
+ _add_media_components()
 
833
  with gr.TabItem("Layout and Chat"):
834
+ chatbot, msg_input, add_msg_btn = _add_chatbot_component()
 
835
  return chatbot, msg_input, add_msg_btn
836
 
837
+ def _create_gradio_lite_html(python_code: str) -> str:
838
+ """
839
+ Wraps the Python code in a Gradio-Lite HTML structure with an iframe.
840
+ This ensures the preview runs isolated from the main app.
841
+ """
842
+ # Escape any existing script tags to prevent injection issues inside srcdoc
843
+ safe_code = python_code.replace("<", "&lt;").replace(">", "&gt;")
844
+
845
+ # To make sure demo.launch() is present for Lite to render
846
+ if "demo.launch()" not in safe_code:
847
+ safe_code += "\n\ndemo.launch()"
848
+
849
+ html_template = f"""
850
+ <div style="width: 100%; height: 600px; border: 1px solid #e5e7eb; border-radius: 8px; overflow: hidden;">
851
+ <iframe
852
+ srcdoc='
853
+ <!DOCTYPE html>
854
+ <html>
855
+ <head>
856
+ <script type="module" crossorigin src="https://gradio-lite-previews.s3.amazonaws.com/PINNED_HF_HUB/dist/lite.js"></script>
857
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />
858
+ <style>
859
+ body {{ margin: 0; padding: 0; height: 100vh; }}
860
+ gradio-lite {{ width: 100%; height: 100%; display: block; }}
861
+ </style>
862
+ </head>
863
+ <body>
864
+ <gradio-lite>
865
+ {safe_code}
866
+ </gradio-lite>
867
+ </body>
868
+ </html>'
869
+ width="100%"
870
+ height="100%"
871
+ style="border: none;"
872
+ sandbox="allow-scripts allow-same-origin"
873
+ ></iframe>
874
+ </div>
875
+ """
876
+ return html_template
877
  # endregion
878
 
879
  # region MAIN LAYOUT DEFINITION
880
  css = """
881
  .gradio-container { max-width: none !important; }
882
+ .fillable { width: 100% !important; max-width: unset !important; }
 
 
 
883
  #app { height: 100vh; overflow-y: scroll; }
884
  """
885
 
 
890
 
891
  gr.Markdown("# 🤖 GitRepo Inspector (MCP Server) 🤖")
892
 
893
+ # --- GLOBAL CONFIGURATION ---
894
+ with gr.Accordion("⚙️ Global Configuration (Configure tools here)", open=True):
895
+ with gr.Row():
896
+ global_github_token = gr.Textbox(
897
+ label="GitHub Token",
898
+ type="password",
899
+ placeholder="Optional for public repos, required for higher rate limits."
900
+ )
901
+ global_api_key = gr.Textbox(
902
+ label="LLM API Key",
903
+ type="password",
904
+ placeholder="Required for UI demo. (Matches selected Provider)"
905
+ )
906
+ with gr.Row():
907
+ global_provider = gr.Dropdown(
908
+ choices=list(AVAILABLE_ISSUE_MODELS_BY_PROVIDER.keys()),
909
+ value="gemini",
910
+ label="LLM Provider (For all tools)"
911
+ )
912
+ global_model = gr.Dropdown(
913
+ choices=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"],
914
+ value=AVAILABLE_ISSUE_MODELS_BY_PROVIDER["gemini"][0],
915
+ label="Main Model (Text/Chat)",
916
+ interactive=True
917
+ )
918
+ with gr.Row():
919
+ models_info_output = gr.JSON(label="Available Models Registry", visible=False)
920
+ get_models_btn = gr.Button("Refresh Models Info", visible=False)
921
+ # -----------------------------
922
+
923
  with gr.Tabs():
924
  with gr.TabItem("1. Sync Repo"):
925
  gr.Markdown("### 🔄 Synchronize Repository with Local Cache")
926
+ gr.Markdown("Fetch open issues and recent releases to speed up analysis.")
 
 
927
  sync_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
 
928
  sync_button = gr.Button("Start Synchronization", variant="primary")
929
  sync_status_output = gr.Textbox(label="Sync Status", interactive=False)
930
 
931
  with gr.TabItem("2. Issue Analyzer"):
932
  gr.Markdown("### 🔍 Analyze a GitHub Issue")
 
 
 
933
  with gr.Row():
934
+ issue_repo_url = gr.Textbox(label="GitHub Repository URL", value="https://github.com/gradio-app/gradio")
 
 
 
 
 
 
 
935
  issue_number_input = gr.Number(label="Issue Number", precision=0)
 
 
936
  issue_analyze_button = gr.Button("Analyze Issue 🕵️", variant="primary")
937
  issue_report_output = gr.HTML(label="Analysis Report")
938
 
939
  with gr.TabItem("3. Issue Duplicate Finder"):
940
  gr.Markdown("### 👯 Find Duplicate Issues")
 
941
  with gr.Row():
942
  dup_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
943
  dup_issue_number = gr.Number(label="Main Issue #", precision=0)
 
 
 
 
 
 
944
  dup_find_button = gr.Button("Find Duplicates", variant="primary")
945
  dup_report_output = gr.Markdown(label="Duplicate Report")
946
 
947
  with gr.TabItem("4. Issue Prioritizer"):
948
  gr.Markdown("### 🥇 Prioritize Backlog")
 
949
  prio_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
 
 
 
 
 
950
  prio_run_button = gr.Button("Generate Priority List", variant="primary")
951
  prio_report_output = gr.Markdown(label="Prioritized Backlog")
952
 
953
+ with gr.TabItem("5. Action Runner (Reply & Close)"):
954
+ gr.Markdown("### ⚡ Action Runner")
955
+ gr.Markdown("This tool allows the Agent (or you) to take action: post comments and close resolved issues.")
956
+
957
  with gr.Row():
958
+ action_repo_url = gr.Textbox(label="Repository URL", value="https://github.com/gradio-app/gradio")
959
+ action_issue_number = gr.Number(label="Issue Number", precision=0)
960
+
961
+ action_comment = gr.Textbox(
962
+ label="Comment Body (Markdown supported)",
963
+ lines=5,
964
+ placeholder="Ex: This issue is resolved in PR #123. Closing now."
965
+ )
966
+
967
+ action_close_checkbox = gr.Checkbox(label="Close this issue?", value=False, info="Check to verify the fix and close the issue on GitHub.")
968
+
969
+ action_button = gr.Button("Execute Action ���", variant="stop") # Variant stop (vermelho) para indicar cuidado
970
+ action_output = gr.Markdown(label="Execution Result")
971
+
972
+ with gr.TabItem("BONUS 1 - Theme Generator"):
973
+ gr.Markdown("### 🖌️ Create a Theme with Natural Language")
974
  with gr.Row():
975
  with gr.Column(scale=1, min_width=450):
 
976
  with gr.Group():
977
  theme_prompt_input = gr.Textbox(label="Describe your theme", placeholder="Ex: a dark theme with purple tones...", lines=4)
978
  theme_generate_button = gr.Button("Generate Theme ✨", variant="primary")
 
981
  with gr.TabItem("Code to Use"):
982
  python_code_output = gr.Code(label="Copy and paste", language="python")
983
  with gr.TabItem("Publish to Hub"):
 
984
  hub_theme_name = gr.Textbox(label="Theme Name", placeholder="my-amazing-theme")
985
  hub_hf_token = gr.Textbox(label="HF Token", type="password", placeholder="hf_...")
986
  upload_button = gr.Button("Upload 🚀")
987
  upload_status = gr.Markdown(visible=False)
988
+ chatbot_comp, msg_input_comp, add_msg_btn_comp = _create_example_app()
 
989
 
990
  with gr.TabItem("BONUS 2 - Sketch Image to Gradio UI"):
991
  gr.Markdown("### 🖼️ Create a UI from a Sketch")
992
+ gr.Markdown("*Note: This tool uses the Global API Key, but requires a Vision Model (selected below).*")
993
  with gr.Row():
994
+ # Specific dropdown for Vision models, updated by global provider change
995
+ sketch_model_dropdown = gr.Dropdown(
996
+ choices=AVAILABLE_SKETCH_MODELS_BY_PROVIDER["gemini"],
997
+ value=AVAILABLE_SKETCH_MODELS_BY_PROVIDER["gemini"][0],
998
+ label="Vision Model (Specific to Sketch Tool)",
999
  interactive=True,
1000
  )
1001
  with gr.Row():
 
1004
  text_desc_input = gr.Textbox(label="Additional Description (Optional)")
1005
  sketch_generate_button = gr.Button("1. Generate Code", variant="secondary")
1006
  with gr.Column(scale=2):
1007
+ with gr.Tabs():
1008
+ with gr.TabItem("🐍 Python Code"):
1009
+ sketch_code_output = gr.Code(label="Generated UI Code", language="python", lines=20)
1010
+ with gr.TabItem("👀 Live Preview"):
1011
+ sketch_preview_output = gr.HTML(label="Gradio-Lite Preview")
1012
 
1013
+ # --- EVENTS & LOGIC ---
 
 
1014
 
1015
+ # Unified Model Updater: Updates BOTH the Text Model (Global) and Vision Model (Sketch) dropdowns
1016
+ get_models_btn.click(
1017
+ fn=get_available_models,
1018
+ inputs=None,
1019
+ outputs=[models_info_output],
1020
+ api_name="get_available_models"
1021
+ )
1022
+ def update_all_model_choices(provider):
1023
+ text_models = AVAILABLE_ISSUE_MODELS_BY_PROVIDER.get(provider, [])
1024
+ vision_models = AVAILABLE_SKETCH_MODELS_BY_PROVIDER.get(provider, [])
1025
+ return (
1026
+ gr.update(choices=text_models, value=text_models[0] if text_models else None),
1027
+ gr.update(choices=vision_models, value=vision_models[0] if vision_models else None)
1028
+ )
1029
 
1030
+ global_provider.change(
1031
+ fn=update_all_model_choices,
1032
+ inputs=[global_provider],
1033
+ outputs=[global_model, sketch_model_dropdown], # Update both dropdowns
1034
  show_api=False
1035
  )
1036
+
1037
+ # Tool 1: Sync
1038
+ sync_button.click(
1039
+ fn=sync_repository,
1040
+ inputs=[sync_repo_url, global_github_token],
1041
+ outputs=[sync_status_output],
1042
+ api_name="sync_repository"
1043
+ )
1044
+
1045
+ # Tool 2: Analyzer
1046
  issue_analyze_button.click(
1047
  fn=analyze_github_issue,
1048
+ inputs=[
1049
+ issue_repo_url, issue_number_input,
1050
+ global_provider, global_model,
1051
+ global_github_token, global_api_key
1052
+ ],
1053
  outputs=[issue_report_output],
1054
  api_name="analyze_github_issue",
1055
  )
1056
+
1057
+ # Tool 3: Duplicate Finder
 
 
 
 
1058
  dup_find_button.click(
1059
  fn=find_duplicate_issues,
1060
+ inputs=[
1061
+ dup_repo_url, dup_issue_number,
1062
+ global_provider, global_model,
1063
+ global_github_token, global_api_key
1064
+ ],
1065
  outputs=[dup_report_output],
1066
  api_name="find_duplicate_issues",
1067
  )
1068
+
1069
+ # Tool 4: Prioritizer
 
 
 
 
 
1070
  prio_run_button.click(
1071
  fn=prioritize_open_issues,
1072
+ inputs=[
1073
+ prio_repo_url,
1074
+ global_provider, global_model,
1075
+ global_api_key
1076
+ ],
1077
  outputs=[prio_report_output],
1078
  api_name="prioritize_open_issues"
1079
  )
1080
 
1081
+ # Tool 5: Action Runner
1082
+ action_button.click(
1083
+ fn=reply_and_close_issue,
1084
+ inputs=[
1085
+ action_repo_url,
1086
+ action_issue_number,
1087
+ action_comment,
1088
+ action_close_checkbox,
1089
+ global_github_token
1090
+ ],
1091
+ outputs=[action_output],
1092
+ api_name="reply_and_close_issue"
1093
  )
1094
+
1095
+ # Bonus 1: Theme Gen
1096
  theme_generate_button.click(
1097
  fn=generate_theme,
1098
+ inputs=[
1099
+ theme_prompt_input,
1100
+ global_provider, global_model,
1101
+ global_api_key
1102
+ ],
1103
  outputs=[generated_theme_state, python_code_output, status_output, css_data_transport],
1104
  ).then(
1105
  fn=None,
 
1107
  outputs=None,
1108
  js="(css) => { const action = () => apply_gradio_theme(css); if (typeof apply_gradio_theme === 'function') { action(); } else { document.addEventListener('theme-helper-injected', action, { once: true }); } }",
1109
  )
1110
+ upload_button.click(fn=handle_upload, inputs=[generated_theme_state, hub_theme_name, hub_hf_token], outputs=[upload_status])
1111
 
1112
+ # Bonus 2: Sketch (Uses Global Provider & Key, but LOCAL Vision Model)
1113
+ generate_event = sketch_generate_button.click(
1114
+ fn=generate_ui_from_sketch,
1115
+ inputs=[
1116
+ sketch_input, text_desc_input,
1117
+ global_provider, sketch_model_dropdown,
1118
+ global_api_key
1119
+ ],
1120
+ outputs=[sketch_code_output],
1121
+ )
1122
+ generate_event.success(
1123
+ fn=_create_gradio_lite_html,
1124
+ inputs=[sketch_code_output],
1125
+ outputs=[sketch_preview_output],
1126
+ show_api=False
1127
  )
1128
+
1129
  def add_message_to_chat(history, message):
1130
  """
1131
  A simple function to add a user message to the chat history
 
1142
  time.sleep(0.5)
1143
  history.append({"role": "assistant", "content": "Thank you for your message!"})
1144
  return history
1145
+
1146
+ # Chat Example Logic
1147
+ add_msg_btn_comp.click(fn=add_message_to_chat, inputs=[chatbot_comp, msg_input_comp], outputs=[chatbot_comp], show_api=False).then(fn=lambda: "", outputs=[msg_input_comp], show_api=False)
1148
+
1149
+ # Init
1150
+ app.load(fn=initialize_database, inputs=None, outputs=None, show_api=False).then(fn=_inject_theme_helper_js, inputs=None, outputs=[html_injector], show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151
 
1152
  # endregion
1153
  if __name__ == "__main__":
1154
+ _check_env()
 
1155
  app.allowed_paths = ["."]
1156
+ app.launch(mcp_server=True, server_port=7860)
 
 
 
config/clients/{gemini.py → gemini_cli.py} RENAMED
@@ -14,12 +14,13 @@ BASE_URL = os.getenv("GEMINI_BASE_URL", "https://generativelanguage.googleapis.c
14
  DEFAULT_TEXT_MODEL = "gemini-2.0-flash" # Default text model
15
  VISION_MODEL = "gemini-2.5-flash-lite" # Model with vision capability
16
 
17
-
18
- def _get_api_key() -> Optional[str]:
19
- api_key = os.getenv(API_KEY_ENV_VAR)
20
- if not api_key:
21
- print("WARNING: GOOGLE_API_KEY environment variable not set.")
22
- return api_key
 
23
 
24
 
25
  def _format_payload_for_gemini(messages: List[Dict], image: Optional[Image.Image] = None) -> Optional[Dict]:
@@ -86,13 +87,14 @@ def call_api(
86
  stream: bool = False,
87
  temperature: float = 0.7,
88
  max_tokens: int = 8192,
 
89
  ) -> Union[Iterator[str], str]:
90
  """
91
  Calls the Google Gemini REST API, supporting text and multimodal inputs, with streaming.
92
  """
93
- api_key = _get_api_key()
94
- if not api_key:
95
- error_msg = "Error: Google API key not configured."
96
  return iter([error_msg]) if stream else error_msg
97
 
98
  # Choose the model based on the presence of an image
@@ -113,12 +115,22 @@ def call_api(
113
  payload["generationConfig"] = {"temperature": temperature, "maxOutputTokens": max_tokens}
114
 
115
  stream_param = "streamGenerateContent" if stream else "generateContent"
116
- request_url = f"{BASE_URL}{model_id}:{stream_param}?key={api_key}"
117
  headers = {"Content-Type": "application/json"}
118
 
119
  try:
120
  response = requests.post(request_url, headers=headers, json=payload, stream=stream, timeout=180)
121
- response.raise_for_status()
 
 
 
 
 
 
 
 
 
 
122
 
123
  if stream:
124
  # Re-implementing the streaming logic
@@ -146,10 +158,7 @@ def call_api(
146
  print(f"Gemini's response format unexpected. Full response: {data}")
147
  return f"[BLOCKED OR EMPTY RESPONSE]\n{data}"
148
 
149
- except requests.exceptions.HTTPError as e:
150
- err_msg = f"API HTTP Error ({e.response.status_code}): {e.response.text[:500]}"
151
- print(err_msg)
152
- return f"Error: {err_msg}"
153
  except Exception as e:
154
- print("Unexpected error in call_gemini_api:")
155
- return f"Error: {e}"
 
14
  DEFAULT_TEXT_MODEL = "gemini-2.0-flash" # Default text model
15
  VISION_MODEL = "gemini-2.5-flash-lite" # Model with vision capability
16
 
17
+ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
18
+ """
19
+ Returns the provided key if valid, otherwise falls back to the environment variable.
20
+ """
21
+ if provided_key and provided_key.strip():
22
+ return provided_key.strip()
23
+ return os.getenv(API_KEY_ENV_VAR)
24
 
25
 
26
  def _format_payload_for_gemini(messages: List[Dict], image: Optional[Image.Image] = None) -> Optional[Dict]:
 
87
  stream: bool = False,
88
  temperature: float = 0.7,
89
  max_tokens: int = 8192,
90
+ api_key: Optional[str] = None,
91
  ) -> Union[Iterator[str], str]:
92
  """
93
  Calls the Google Gemini REST API, supporting text and multimodal inputs, with streaming.
94
  """
95
+ final_api_key = _get_api_key(api_key)
96
+ if not final_api_key:
97
+ error_msg = "Error: Authentication required. No API key provided and no server fallback found."
98
  return iter([error_msg]) if stream else error_msg
99
 
100
  # Choose the model based on the presence of an image
 
115
  payload["generationConfig"] = {"temperature": temperature, "maxOutputTokens": max_tokens}
116
 
117
  stream_param = "streamGenerateContent" if stream else "generateContent"
118
+ request_url = f"{BASE_URL}{model_id}:{stream_param}?key={final_api_key}"
119
  headers = {"Content-Type": "application/json"}
120
 
121
  try:
122
  response = requests.post(request_url, headers=headers, json=payload, stream=stream, timeout=180)
123
+ if response.status_code != 200:
124
+ try:
125
+ error_details = response.json()
126
+ error_msg = error_details.get('error', {}).get('message', response.text)
127
+ except:
128
+ error_msg = response.text
129
+
130
+ if response.status_code in [400, 401, 403]:
131
+ raise ValueError(f"Gemini Auth Error: {error_msg}")
132
+ else:
133
+ raise RuntimeError(f"Gemini API Error ({response.status_code}): {error_msg}")
134
 
135
  if stream:
136
  # Re-implementing the streaming logic
 
158
  print(f"Gemini's response format unexpected. Full response: {data}")
159
  return f"[BLOCKED OR EMPTY RESPONSE]\n{data}"
160
 
161
+ except requests.exceptions.RequestException as e:
162
+ raise ConnectionError(f"Connection to Gemini failed: {str(e)}")
 
 
163
  except Exception as e:
164
+ raise e
 
config/clients/nebius_cli.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import os
3
+ from io import BytesIO
4
+ from typing import Dict, List, Optional
5
+
6
+ from PIL import Image
7
+ from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
8
+
9
+ # Configuration
10
+ API_KEY_ENV_VAR = "NEBIUS_API_KEY"
11
+ BASE_URL = os.getenv("NEBIUS_BASE_URL", "https://api.studio.nebius.ai/v1/")
12
+ DEFAULT_TEXT_MODEL = "meta-llama/Meta-Llama-3.1-70B-Instruct"
13
+ DEFAULT_VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
14
+
15
+ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
16
+ """
17
+ Returns the provided key if valid, otherwise falls back to the environment variable.
18
+ """
19
+ if provided_key and provided_key.strip():
20
+ return provided_key.strip()
21
+ return os.getenv(API_KEY_ENV_VAR)
22
+
23
+ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
24
+ """
25
+ Calls the Nebius AI Studio API (OpenAI-compatible).
26
+ """
27
+ # 1. Resolve API Key
28
+ final_api_key = _get_api_key(api_key)
29
+
30
+ if not final_api_key:
31
+ return "Error: Authentication required. Please provide a Nebius API Key."
32
+
33
+ # 2. Initialize Client
34
+ try:
35
+ client = OpenAI(base_url=BASE_URL, api_key=final_api_key)
36
+ except Exception as e:
37
+ return f"Error: Failed to initialize Nebius client. {e}"
38
+
39
+ # 3. Prepare Messages
40
+ final_messages = []
41
+
42
+ if image:
43
+ print("Making a VISION call to Nebius.")
44
+ buffered = BytesIO()
45
+ image.save(buffered, format="PNG")
46
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
47
+
48
+ user_text = "Analyze this image."
49
+ for msg in messages:
50
+ if msg["role"] == "user":
51
+ user_text = msg["content"]
52
+ break
53
+
54
+ content = [
55
+ {"type": "text", "text": user_text},
56
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}
57
+ ]
58
+
59
+ for msg in messages:
60
+ if msg["role"] == "system":
61
+ final_messages.append(msg)
62
+
63
+ final_messages.append({"role": "user", "content": content})
64
+ final_model = model_name or DEFAULT_VISION_MODEL
65
+ else:
66
+ print("Making a TEXT call to Nebius.")
67
+ final_messages = messages
68
+ final_model = model_name or DEFAULT_TEXT_MODEL
69
+
70
+ # 4. Call API
71
+ try:
72
+ print(f"Calling Nebius API with model: {final_model}")
73
+ completion = client.chat.completions.create(
74
+ model=final_model,
75
+ messages=final_messages,
76
+ **kwargs
77
+ )
78
+ return completion.choices[0].message.content
79
+ except AuthenticationError as e:
80
+ raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
81
+ except RateLimitError:
82
+ raise RuntimeError("Rate Limit Exceeded (429). Please try again later.")
83
+ except APIConnectionError:
84
+ raise ConnectionError("Failed to connect to API. Check your internet.")
85
+ except Exception as e:
86
+ raise RuntimeError(f"API Error: {str(e)}")
config/clients/openai_cli.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import os
3
+ from io import BytesIO
4
+ from typing import Dict, List, Optional
5
+
6
+ from PIL import Image
7
+ from openai import APIConnectionError, AuthenticationError, OpenAI, RateLimitError
8
+
9
+ # Configuration
10
+ API_KEY_ENV_VAR = "OPENAI_API_KEY"
11
+ DEFAULT_TEXT_MODEL = "gpt-4o-mini"
12
+ DEFAULT_VISION_MODEL = "gpt-4o"
13
+
14
+ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
15
+ """
16
+ Returns the provided key if valid, otherwise falls back to the environment variable.
17
+ """
18
+ if provided_key and provided_key.strip():
19
+ return provided_key.strip()
20
+ return os.getenv(API_KEY_ENV_VAR)
21
+
22
+ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
23
+ """
24
+ Calls the OpenAI API (GPT models).
25
+ """
26
+ # 1. Resolve API Key
27
+ final_api_key = _get_api_key(api_key)
28
+
29
+ if not final_api_key:
30
+ return "Error: Authentication required. Please provide an OpenAI API Key."
31
+
32
+ # 2. Initialize Client
33
+ try:
34
+ client = OpenAI(api_key=final_api_key)
35
+ except Exception as e:
36
+ return f"Error: Failed to initialize OpenAI client. {e}"
37
+
38
+ # 3. Prepare Messages & Payload
39
+ final_messages = []
40
+
41
+ # Handle Vision (Multimodal)
42
+ if image:
43
+ print("Making a VISION call to OpenAI.")
44
+ buffered = BytesIO()
45
+ image.save(buffered, format="PNG")
46
+ image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
47
+
48
+ # Extract user text from messages to combine with image
49
+ user_text = "Analyze this image."
50
+ for msg in messages:
51
+ if msg["role"] == "user":
52
+ user_text = msg["content"]
53
+ break
54
+
55
+ # Format strictly for OpenAI Vision
56
+ content = [
57
+ {"type": "text", "text": user_text},
58
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}
59
+ ]
60
+
61
+ # Add system prompt if exists
62
+ for msg in messages:
63
+ if msg["role"] == "system":
64
+ final_messages.append(msg)
65
+
66
+ final_messages.append({"role": "user", "content": content})
67
+ final_model = model_name or DEFAULT_VISION_MODEL
68
+
69
+ # Handle Text-Only
70
+ else:
71
+ print("Making a TEXT call to OpenAI.")
72
+ final_messages = messages
73
+ final_model = model_name or DEFAULT_TEXT_MODEL
74
+
75
+ # 4. Call API
76
+ try:
77
+ print(f"Calling OpenAI API with model: {final_model}")
78
+ completion = client.chat.completions.create(
79
+ model=final_model,
80
+ messages=final_messages,
81
+ **kwargs
82
+ )
83
+ return completion.choices[0].message.content
84
+ except AuthenticationError as e:
85
+ raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
86
+ except RateLimitError:
87
+ raise RuntimeError("Rate Limit Exceeded (429). Please try again later.")
88
+ except APIConnectionError:
89
+ raise ConnectionError("Failed to connect to API. Check your internet.")
90
+ except Exception as e:
91
+ raise RuntimeError(f"API Error: {str(e)}")
config/clients/{sambanova.py → sambanova_cli.py} RENAMED
@@ -4,6 +4,7 @@ from io import BytesIO
4
  from typing import Dict, List, Optional
5
 
6
  from PIL import Image
 
7
  from sambanova import SambaNova
8
 
9
 
@@ -13,23 +14,29 @@ BASE_URL = os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
13
  DEFAULT_TEXT_MODEL = "Meta-Llama-3.1-8B-Instruct"
14
  DEFAULT_VISION_MODEL = "Llama-4-Maverick-17B-128E-Instruct"
15
 
16
- api_key = os.getenv(API_KEY_ENV_VAR)
17
- if not api_key:
18
- print("WARNING: SAMBANOVA_API_KEY environment variable not set.")
19
- client = None
20
- else:
21
- client = SambaNova(
22
- api_key=api_key,
23
- base_url=BASE_URL,
24
- )
25
-
26
 
27
- def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, **kwargs):
28
  """
29
  Calls the SambaNova API, handling text or vision requests.
30
  """
31
- if client is None:
32
- return "Error: SambaNova client not initialized. Check the API key."
 
 
 
 
 
 
 
 
 
33
 
34
  if image:
35
  print("Making a VISION call to SambaNova.")
@@ -54,7 +61,19 @@ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Opti
54
  final_model = model_name or DEFAULT_TEXT_MODEL
55
 
56
  # API Call
57
- print(f"Calling SambaNova API with model: {final_model}")
58
- completion = client.chat.completions.create(model=final_model, messages=final_messages, **kwargs)
59
-
60
- return completion.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from typing import Dict, List, Optional
5
 
6
  from PIL import Image
7
+ from openai import APIConnectionError, AuthenticationError, RateLimitError
8
  from sambanova import SambaNova
9
 
10
 
 
14
  DEFAULT_TEXT_MODEL = "Meta-Llama-3.1-8B-Instruct"
15
  DEFAULT_VISION_MODEL = "Llama-4-Maverick-17B-128E-Instruct"
16
 
17
+ def _get_api_key(provided_key: Optional[str] = None) -> Optional[str]:
18
+ """
19
+ Returns the provided key if valid, otherwise falls back to the environment variable.
20
+ """
21
+ if provided_key and provided_key.strip():
22
+ return provided_key.strip()
23
+ return os.getenv(API_KEY_ENV_VAR)
 
 
 
24
 
25
+ def call_api(messages: List[Dict], model_name: Optional[str] = None, image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs):
26
  """
27
  Calls the SambaNova API, handling text or vision requests.
28
  """
29
+
30
+ final_api_key = _get_api_key(api_key)
31
+
32
+ if not final_api_key:
33
+ return "Error: Authentication required. Please provide a SambaNova API Key."
34
+
35
+ try:
36
+ # Initialize client with the resolved key
37
+ client = SambaNova(api_key=final_api_key, base_url=BASE_URL)
38
+ except Exception as e:
39
+ return f"Error: Failed to initialize SambaNova client. {e}"
40
 
41
  if image:
42
  print("Making a VISION call to SambaNova.")
 
61
  final_model = model_name or DEFAULT_TEXT_MODEL
62
 
63
  # API Call
64
+ try:
65
+ print(f"Calling SambaNova API with model: {final_model}")
66
+ completion = client.chat.completions.create(
67
+ model=final_model,
68
+ messages=final_messages,
69
+ **kwargs
70
+ )
71
+ return completion.choices[0].message.content
72
+ except AuthenticationError as e:
73
+ raise ValueError(f"Authentication Failed: {e.body.get('message', str(e)) if e.body else str(e)}")
74
+ except RateLimitError:
75
+ raise RuntimeError("Rate Limit Exceeded (429). Please try again later.")
76
+ except APIConnectionError:
77
+ raise ConnectionError("Failed to connect to API. Check your internet.")
78
+ except Exception as e:
79
+ raise RuntimeError(f"API Error: {str(e)}")
config/constants.py CHANGED
@@ -3,13 +3,29 @@ PROVIDER_LOGOS = {
3
  "gemini": "https://upload.wikimedia.org/wikipedia/commons/d/d9/Google_Gemini_logo_2025.svg",
4
  "sambanova": "https://sambanova.ai/hs-fs/hubfs/sn-new-gray-logo.png?width=220&height=92&name=sn-new-gray-logo.png",
5
  "modal": "https://modal.com/_app/immutable/assets/logotype.CAx-nu9G.svg",
 
 
6
  "default": "https://www.gradio.app/_app/immutable/assets/gradio.CHB5adID.svg",
7
  }
8
 
9
- AVAILABLE_SKETCH_MODELS_BY_PROVIDER = {"gemini": ["gemini-2.5-flash-lite"], "sambanova": ["Llama-4-Maverick-17B-128E-Instruct"]}
 
 
 
 
 
 
 
10
  AVAILABLE_ISSUE_MODELS_BY_PROVIDER = {
11
  "gemini": ["gemini-2.0-flash"],
12
- "sambanova": ["DeepSeek-R1", "DeepSeek-V3-0324", "DeepSeek-V3.1", "Meta-Llama-3.1-8B-Instruct", "Meta-Llama-3.3-70B-Instruct", "gpt-oss-120b"],
 
 
 
 
 
 
 
13
  }
14
 
15
  # Prompt template
@@ -38,7 +54,7 @@ ALLOWED_HUE_COLORS = [
38
  "stone",
39
  ]
40
 
41
- SYSTEM_PROMPT_THEME_GENERATOR = f"""
42
  You are a world-class UI/UX designer with a deep understanding of the Gradio theme system. Your task is to take a user's high-level concept and generate a COMPLETE, COHESIVE, and AESTHETICALLY PLEASING theme configuration in JSON.
43
 
44
  **Your Design Philosophy:**
@@ -63,90 +79,41 @@ You are a world-class UI/UX designer with a deep understanding of the Gradio the
63
  Analyze the user's high-level request. Follow all the rules. Respond ONLY with the raw JSON object.
64
  """
65
 
66
- SYSTEM_PROMPT_GEMINI_ = """
67
- You are a precise theme generation assistant for the Gradio Python library. Your only job is to translate a user's description into a valid JSON object.
68
-
69
- **Your Task:**
70
- Based on the user's prompt, generate a JSON object with two keys: `constructor_args` and `set_args`.
71
-
72
- **RULES FOR `constructor_args`:**
73
- - This section defines the BASE palettes and sizes.
74
- - `primary_hue`, `secondary_hue`, `neutral_hue`: Choose a single color name (e.g., "blue", "pink", "slate").
75
- - `spacing_size`, `radius_size`, `text_size`: Choose `sm`, `md`, or `lg`.
76
- - `font`, `font_mono`: MUST be a list of strings (e.g., `["Roboto", "sans-serif"]`).
77
-
78
- **RULES FOR `set_args` (VERY IMPORTANT):**
79
- - This section overrides specific component styles.
80
- - **FOR EVERY VISUAL CUSTOMIZATION, YOU MUST PROVIDE A PAIR OF VALUES: one for light mode and one for dark mode (`_dark` version).**
81
- - **Example:** If the user wants a white background, you MUST provide both:
82
- - `"body_background_fill": "white"`
83
- - `"body_background_fill_dark": "*neutral_950"` (or another suitable dark color)
84
- - **Example:** If the user wants a green "cancel" button, you MUST provide both:
85
- - `"button_cancel_background_fill": "green"`
86
- - `"button_cancel_background_fill_dark": "darkgreen"`
87
- - **Example:** If a value should be the same in both modes (like a specific border width), provide both:
88
- - `"button_border_width": "2px"`
89
- - `"button_border_width_dark": "2px"`
90
- - Use palette references like `*primary_500` whenever possible. Remember the valid range is 50-950.
91
- - If the user asks for a "gradient", use the CSS `linear-gradient()` syntax.
92
-
93
- **Final Instruction:** Do not explain. Do not use markdown. Respond ONLY with the raw JSON object. Analyze the user prompt carefully and apply the rules above to fill out `constructor_args` and `set_args`.
94
- """
95
-
96
- SYSTEM_PROMPT_LAYOUT = """
97
- You are a Python code generation assistant specializing in the Gradio library. Your task is to generate ONLY the Python code for a `gr.Blocks` layout based on a user's description.
98
-
99
- **CRITICAL RULES:**
100
- 1. **Code Only:** Your entire response MUST be a valid Python code block and NOTHING else. Do not include explanations, installation instructions, or markdown formatting.
101
- 2. **Layout Only:** Generate ONLY the layout structure (`gr.Blocks`, `gr.Row`, `gr.Column`, `gr.Tabs`, `gr.TabItem`, `gr.Accordion`, etc.).
102
- 3. **Placeholders, Not Logic:** Do NOT invent functions, logic, or event listeners (`.click()`, `.change()`). Instead, place placeholder components (`gr.Button()`, `gr.Textbox()`, etc.) inside the layout.
103
- 4. **Use Comments for Guidance:** Add `# TODO:` comments to guide the user on where to add their components and logic.
104
- 5. **Keep it Simple:** The goal is to provide a clean, empty boilerplate/scaffold for the user to fill in.
105
-
106
- **Example User Prompt:** "Create a layout with a row. On the left, a column for inputs, and on the right, a column with two tabs for outputs."
107
-
108
- **Example PERFECT Response (Your entire output should look like this):**
109
- ```python
110
- import gradio as gr
111
-
112
- with gr.Blocks() as demo:
113
- with gr.Row():
114
- with gr.Column(scale=1):
115
- gr.Markdown("### Inputs")
116
- # TODO: Add your input components here
117
- input_slider = gr.Slider(label="Input Value")
118
- submit_button = gr.Button("Submit")
119
-
120
- with gr.Column(scale=2):
121
- with gr.Tabs():
122
- with gr.TabItem("Image Output"):
123
- # TODO: Add your image output component here
124
- output_image = gr.Image()
125
- with gr.TabItem("Text Output"):
126
- # TODO: Add your text output component here
127
- output_text = gr.Textbox(label="Logs")
128
-
129
- # TODO: Define your functions and connect event listeners here
130
- # submit_button.click(...)
131
-
132
- demo.launch()
133
- Now, generate the layout code for the user's request.
134
- """
135
-
136
- SYSTEM_PROMPT_SKETCH = (
137
  """
138
- You are an expert Gradio UI developer. Your task is to analyze a sketch and generate simple, clean, single-line Python code for a `gradio.Blocks` UI.
139
 
140
  **CRITICAL RULES:**
141
  1. **CODE ONLY:** Respond with a valid Python code block and nothing else.
142
  2. **NO EXTERNAL LIBRARIES:** Only use the `gradio` library.
143
  3. **NO FUNCTIONS OR CLASSES:** Start directly with `import gradio as gr` then `with gr.Blocks() as demo:`.
144
  4. **NO `demo.launch()`:** Do NOT include the `demo.launch()` call.
145
- 5. **NO MULTI-LINE STRINGS:** This is very important. Do NOT use triple quotes (`"""
 
 
 
 
146
  "..."
147
- """` or `'''...'''`). Use single-line strings (`"..."`) and `\n` for newlines if necessary. For `gr.Markdown`, combine content into a single line. Example: `gr.Markdown("# Title\n- Point 1\n- Point 2")`.
148
- 6. **MODERN COMPONENTS:** NEVER use `gr.Box()`. Use `gr.Group()` instead.
149
- 7. **DATA INITIALIZATION:** For `gr.Dataframe`, use a list of lists.
150
 
151
  Now, analyze the user's sketch and generate the code following ALL rules.
152
  """
 
3
  "gemini": "https://upload.wikimedia.org/wikipedia/commons/d/d9/Google_Gemini_logo_2025.svg",
4
  "sambanova": "https://sambanova.ai/hs-fs/hubfs/sn-new-gray-logo.png?width=220&height=92&name=sn-new-gray-logo.png",
5
  "modal": "https://modal.com/_app/immutable/assets/logotype.CAx-nu9G.svg",
6
+ "openai": "https://upload.wikimedia.org/wikipedia/commons/4/4d/OpenAI_Logo.svg",
7
+ "nebius": "https://mintcdn.com/nebius-723e8b65/jsgY7B_gdaTjMC6y/logo/Main-logo-TF-Light.svg?fit=max&auto=format&n=jsgY7B_gdaTjMC6y&q=85&s=48ceb3cd949e5160c884634bbaf1af59",
8
  "default": "https://www.gradio.app/_app/immutable/assets/gradio.CHB5adID.svg",
9
  }
10
 
11
+ AVAILABLE_SKETCH_MODELS_BY_PROVIDER = {
12
+ "gemini": ["gemini-2.5-flash", "gemini-2.5-flash-lite"],
13
+ "sambanova": ["Llama-4-Maverick-17B-128E-Instruct"],
14
+ "openai": ["gpt-4o-mini", "gpt-4o"],
15
+ "nebius": [
16
+ "Qwen/Qwen2.5-VL-72B-Instruct",
17
+ ]
18
+ }
19
  AVAILABLE_ISSUE_MODELS_BY_PROVIDER = {
20
  "gemini": ["gemini-2.0-flash"],
21
+ "sambanova": ["DeepSeek-R1", "DeepSeek-V3-0324", "DeepSeek-V3.1", "Meta-Llama-3.1-8B-Instruct", "Meta-Llama-3.3-70B-Instruct", "gpt-oss-120b"],
22
+ "openai": ["gpt-4o-mini", "gpt-4o"],
23
+ "nebius": [
24
+ "deepseek-ai/DeepSeek-R1-0528",
25
+ "meta-llama/Llama-3.3-70B-Instruct",
26
+ "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
27
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-fast"
28
+ ]
29
  }
30
 
31
  # Prompt template
 
54
  "stone",
55
  ]
56
 
57
+ SYSTEM_PROMPT_THEME_GENERATOR_EN = f"""
58
  You are a world-class UI/UX designer with a deep understanding of the Gradio theme system. Your task is to take a user's high-level concept and generate a COMPLETE, COHESIVE, and AESTHETICALLY PLEASING theme configuration in JSON.
59
 
60
  **Your Design Philosophy:**
 
79
  Analyze the user's high-level request. Follow all the rules. Respond ONLY with the raw JSON object.
80
  """
81
 
82
+ # SYSTEM_PROMPT_SKETCH_EN = (
83
+ # """
84
+ # You are an expert Gradio UI developer. Your task is to analyze a sketch and generate simple, clean, single-line Python code for a `gradio.Blocks` UI.
85
+
86
+ # **CRITICAL RULES:**
87
+ # 1. **CODE ONLY:** Respond with a valid Python code block and nothing else.
88
+ # 2. **NO EXTERNAL LIBRARIES:** Only use the `gradio` library.
89
+ # 3. **NO FUNCTIONS OR CLASSES:** Start directly with `import gradio as gr` then `with gr.Blocks() as demo:`.
90
+ # 4. **NO MULTI-LINE STRINGS:** This is very important. Do NOT use triple quotes (`"""
91
+ # "..."
92
+ # """` or `'''...'''`). Use single-line strings (`"..."`) and `\n` for newlines if necessary. For `gr.Markdown`, combine content into a single line. Example: `gr.Markdown("# Title\n- Point 1\n- Point 2")`.
93
+ # 5. **MODERN COMPONENTS:** NEVER use `gr.Box()`. Use `gr.Group()` instead.
94
+ # 6. **DATA INITIALIZATION:** For `gr.Dataframe`, use a list of lists.
95
+
96
+ # Now, analyze the user's sketch and generate the code following ALL rules.
97
+ # """
98
+ # )
99
+ SYSTEM_PROMPT_SKETCH_EN = (
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  """
101
+ You are an expert Gradio UI developer. Your task is to analyze a sketch and generate valid, well-formatted Python code for a `gradio.Blocks` UI.
102
 
103
  **CRITICAL RULES:**
104
  1. **CODE ONLY:** Respond with a valid Python code block and nothing else.
105
  2. **NO EXTERNAL LIBRARIES:** Only use the `gradio` library.
106
  3. **NO FUNCTIONS OR CLASSES:** Start directly with `import gradio as gr` then `with gr.Blocks() as demo:`.
107
  4. **NO `demo.launch()`:** Do NOT include the `demo.launch()` call.
108
+ 5. **FORMATTING:**
109
+ - Use standard Python indentation (4 spaces).
110
+ - **DO NOT** put multiple statements on one line using semicolons (`;`).
111
+ - Each component must be on its own line.
112
+ 6. **STRINGS:** Do NOT use triple quotes (`"""
113
  "..."
114
+ """` or `'''...'''`). Use single-line strings (`"..."`) and `\n` for newlines if necessary.
115
+ 7. **MODERN COMPONENTS:** NEVER use `gr.Box()`. Use `gr.Group()` instead. NEVER use `gr.Date()`. Use `gr.Textbox()` instead.
116
+ 8. **DATA INITIALIZATION:** For `gr.Dataframe`, use a list of lists.
117
 
118
  Now, analyze the user's sketch and generate the code following ALL rules.
119
  """
config/model_factory.py CHANGED
@@ -5,15 +5,13 @@ It abstracts the specific client implementations and provides a unified interfac
5
  """
6
 
7
  from typing import Any, Callable, Dict, List, Optional
8
-
9
  from PIL import Image
10
 
11
-
12
  # Step 1: Import the client-specific API call functions
13
  # It is assumed that there is a 'clients' directory with modules such as gemini.py, sambanova.py, etc.
14
  # Each module should expose a `call_api` function.
15
  try:
16
- from .clients import gemini, sambanova
17
  except ImportError:
18
  print("Warning: Could not import all LLM clients. Please ensure clients/gemini.py, clients/sambanova.py, etc. exist.")
19
 
@@ -22,14 +20,16 @@ except ImportError:
22
  def call_api(*args, **kwargs):
23
  return "Error: Client module not found."
24
 
25
- gemini = sambanova = DummyClient()
26
 
27
 
28
  # Step 2: Create a registry of available providers
29
  # This dictionary maps a provider name (string) to the function that calls its API.
30
  PROVIDER_REGISTRY: Dict[str, Callable] = {
31
- "gemini": gemini.call_api,
32
- "sambanova": sambanova.call_api,
 
 
33
  # To add a new provider, just add a new entry here, e.g.:
34
  # "another_provider": another_provider.call_api,
35
  }
@@ -62,7 +62,7 @@ class LLMFactory:
62
  return PROVIDER_REGISTRY[provider_key]
63
 
64
  @staticmethod
65
- def call(llm_provider: str, llm_model: Optional[str] = None, messages: List[Dict] = [], image: Optional[Image.Image] = None, **kwargs: Any) -> str:
66
  """
67
  The main factory method. It routes the request to the correct provider client.
68
  This method is designed to be the single point of contact for all LLM calls.
@@ -72,6 +72,7 @@ class LLMFactory:
72
  llm_model (Optional[str]): The specific model name to use (e.g., "gemini-1.5-pro-latest").
73
  messages (List[Dict]): A list of message dictionaries, following the standard role/content format.
74
  image (Optional[Image.Image]): A PIL Image object for multimodal (vision) requests.
 
75
  **kwargs: Additional keyword arguments to pass to the provider's API call function
76
  (e.g., temperature, max_tokens).
77
 
@@ -79,27 +80,21 @@ class LLMFactory:
79
  str: The text response from the LLM.
80
  """
81
  print(f"LLM Factory: Routing call to provider '{llm_provider}' with model '{llm_model or 'default'}'.")
 
 
 
82
 
83
- try:
84
- # Get the correct API function from the registry
85
- api_call_function = LLMFactory.get_provider(llm_provider)
86
-
87
- # Prepare the arguments to be passed to the client function.
88
- # Each client's `call_api` function is expected to handle these arguments.
89
- api_kwargs = {"messages": messages, "model_name": llm_model, "image": image, **kwargs}
90
-
91
- # Execute the API call
92
- result = api_call_function(**api_kwargs)
93
 
94
- if not isinstance(result, str):
95
- print(f"Warning: LLM provider '{llm_provider}' returned a non-string result of type {type(result)}.")
96
- return str(result)
97
 
98
- return result
 
 
99
 
100
- except Exception:
101
- import traceback
102
 
103
- traceback.print_exc()
104
- # Return a standardized error string
105
- return f"Error: Failed to get a response from the '{llm_provider}' provider."
 
5
  """
6
 
7
  from typing import Any, Callable, Dict, List, Optional
 
8
  from PIL import Image
9
 
 
10
  # Step 1: Import the client-specific API call functions
11
  # It is assumed that there is a 'clients' directory with modules such as gemini.py, sambanova.py, etc.
12
  # Each module should expose a `call_api` function.
13
  try:
14
+ from .clients import gemini_cli, sambanova_cli, openai_cli, nebius_cli
15
  except ImportError:
16
  print("Warning: Could not import all LLM clients. Please ensure clients/gemini.py, clients/sambanova.py, etc. exist.")
17
 
 
20
  def call_api(*args, **kwargs):
21
  return "Error: Client module not found."
22
 
23
+ gemini_cli = sambanova_cli = openai_cli = nebius_cli = DummyClient()
24
 
25
 
26
  # Step 2: Create a registry of available providers
27
  # This dictionary maps a provider name (string) to the function that calls its API.
28
  PROVIDER_REGISTRY: Dict[str, Callable] = {
29
+ "gemini": gemini_cli.call_api,
30
+ "sambanova": sambanova_cli.call_api,
31
+ "openai": openai_cli.call_api,
32
+ "nebius": nebius_cli.call_api,
33
  # To add a new provider, just add a new entry here, e.g.:
34
  # "another_provider": another_provider.call_api,
35
  }
 
62
  return PROVIDER_REGISTRY[provider_key]
63
 
64
  @staticmethod
65
+ def call(llm_provider: str, llm_model: Optional[str] = None, messages: List[Dict] = [], image: Optional[Image.Image] = None, api_key: Optional[str] = None, **kwargs: Any) -> str:
66
  """
67
  The main factory method. It routes the request to the correct provider client.
68
  This method is designed to be the single point of contact for all LLM calls.
 
72
  llm_model (Optional[str]): The specific model name to use (e.g., "gemini-1.5-pro-latest").
73
  messages (List[Dict]): A list of message dictionaries, following the standard role/content format.
74
  image (Optional[Image.Image]): A PIL Image object for multimodal (vision) requests.
75
+ api_key (Optional[str]): The API key for the provider. If not provided, it will try to use environment variables.
76
  **kwargs: Additional keyword arguments to pass to the provider's API call function
77
  (e.g., temperature, max_tokens).
78
 
 
80
  str: The text response from the LLM.
81
  """
82
  print(f"LLM Factory: Routing call to provider '{llm_provider}' with model '{llm_model or 'default'}'.")
83
+
84
+ # Get the correct API function from the registry
85
+ api_call_function = LLMFactory.get_provider(llm_provider)
86
 
87
+ # Prepare the arguments to be passed to the client function.
88
+ # Each client's `call_api` function is expected to handle these arguments.
89
+ api_kwargs = {"messages": messages, "model_name": llm_model, "image": image, "api_key": api_key, **kwargs}
 
 
 
 
 
 
 
90
 
91
+ # Execute the API call
92
+ result = api_call_function(**api_kwargs)
 
93
 
94
+ if not isinstance(result, str):
95
+ print(f"Warning: LLM provider '{llm_provider}' returned a non-string result of type {type(result)}.")
96
+ return str(result)
97
 
98
+ return result
 
99
 
100
+
 
 
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
  gradio
2
  gradio_htmlinjector
3
- dotenv
4
  requests
5
  sambanova
6
  markdown
7
- psycopg2-binary
 
 
 
1
  gradio
2
  gradio_htmlinjector
3
+ python-dotenv
4
  requests
5
  sambanova
6
  markdown
7
+ psycopg2-binary
8
+ langchain-google-genai
9
+ openai