Spaces:
Runtime error
Runtime error
Update server.py
Browse files
server.py
CHANGED
|
@@ -3,33 +3,28 @@ from fastapi import FastAPI, HTTPException
|
|
| 3 |
from pydantic import BaseModel
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import torch
|
| 6 |
-
import subprocess
|
| 7 |
-
import tempfile
|
| 8 |
|
| 9 |
app = FastAPI()
|
| 10 |
|
| 11 |
-
# β
Fix: Use `/tmp` as cache directory (
|
| 12 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp"
|
| 13 |
os.environ["HF_HOME"] = "/tmp"
|
| 14 |
|
| 15 |
-
# β
Ensure
|
| 16 |
if not os.path.exists("/tmp"):
|
| 17 |
os.makedirs("/tmp")
|
| 18 |
|
| 19 |
-
# β
Load
|
| 20 |
model_name = "deepseek-ai/DeepSeek-Coder-V2-Base"
|
| 21 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 22 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
| 23 |
|
| 24 |
class CodeRequest(BaseModel):
|
| 25 |
user_story: str
|
| 26 |
|
| 27 |
-
class TestRequest(BaseModel):
|
| 28 |
-
code: str
|
| 29 |
-
|
| 30 |
@app.post("/generate-code")
|
| 31 |
def generate_code(request: CodeRequest):
|
| 32 |
-
"""Generates AI-powered
|
| 33 |
prompt = f"Generate structured code for: {request.user_story}"
|
| 34 |
|
| 35 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
|
@@ -38,37 +33,6 @@ def generate_code(request: CodeRequest):
|
|
| 38 |
|
| 39 |
return {"generated_code": generated_code}
|
| 40 |
|
| 41 |
-
@app.post("/test-code")
|
| 42 |
-
def test_code(request: TestRequest):
|
| 43 |
-
"""Runs automated testing on AI-generated code"""
|
| 44 |
-
try:
|
| 45 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".py", dir="/tmp") as temp_file:
|
| 46 |
-
temp_file.write(request.code.encode())
|
| 47 |
-
temp_file.close()
|
| 48 |
-
|
| 49 |
-
result = subprocess.run(["pytest", temp_file.name], capture_output=True, text=True)
|
| 50 |
-
os.unlink(temp_file.name)
|
| 51 |
-
|
| 52 |
-
if result.returncode == 0:
|
| 53 |
-
return {"test_status": "All tests passed!"}
|
| 54 |
-
else:
|
| 55 |
-
return {"test_status": "Test failed!", "details": result.stderr}
|
| 56 |
-
|
| 57 |
-
except Exception as e:
|
| 58 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 59 |
-
|
| 60 |
-
@app.get("/execute-code")
|
| 61 |
-
def execute_code():
|
| 62 |
-
"""Executes AI-generated code and returns output"""
|
| 63 |
-
sample_code = "print('Hello from AI-generated code!')"
|
| 64 |
-
|
| 65 |
-
try:
|
| 66 |
-
result = subprocess.run(["python3", "-c", sample_code], capture_output=True, text=True)
|
| 67 |
-
return {"status": "Execution successful!", "output": result.stdout}
|
| 68 |
-
|
| 69 |
-
except Exception as e:
|
| 70 |
-
return {"status": "Execution failed!", "error": str(e)}
|
| 71 |
-
|
| 72 |
if __name__ == "__main__":
|
| 73 |
import uvicorn
|
| 74 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
| 3 |
from pydantic import BaseModel
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import torch
|
|
|
|
|
|
|
| 6 |
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
+
# β
Fix: Use `/tmp` as the cache directory (Hugging Face Spaces allows writing here)
|
| 10 |
os.environ["TRANSFORMERS_CACHE"] = "/tmp"
|
| 11 |
os.environ["HF_HOME"] = "/tmp"
|
| 12 |
|
| 13 |
+
# β
Ensure the `/tmp` directory exists
|
| 14 |
if not os.path.exists("/tmp"):
|
| 15 |
os.makedirs("/tmp")
|
| 16 |
|
| 17 |
+
# β
Load DeepSeek-Coder-V2-Base Model with correct cache directory
|
| 18 |
model_name = "deepseek-ai/DeepSeek-Coder-V2-Base"
|
| 19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir="/tmp")
|
| 20 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", cache_dir="/tmp")
|
| 21 |
|
| 22 |
class CodeRequest(BaseModel):
|
| 23 |
user_story: str
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
@app.post("/generate-code")
|
| 26 |
def generate_code(request: CodeRequest):
|
| 27 |
+
"""Generates structured AI-powered code based on user story"""
|
| 28 |
prompt = f"Generate structured code for: {request.user_story}"
|
| 29 |
|
| 30 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 33 |
|
| 34 |
return {"generated_code": generated_code}
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
if __name__ == "__main__":
|
| 37 |
import uvicorn
|
| 38 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|