micrograd/app.py

31 lines
934 B
Python

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from micrograd.train import model # Replace with your ML model library
from typing import List
# Create FastAPI app
app = FastAPI()
# Load your ML model (replace this with your model loading logic)
# model = engine.load_model("path/to/your/model")
# Define a request model
class Item(BaseModel):
data: List[float] # Change 'list' to match the input format of your model
# Endpoint for ML inference
@app.post("/predict")
async def predict(item: Item):
try:
# Perform prediction (modify this part according to your model's API)
nn = model()
prediction = nn(item.data)
return {"prediction": prediction.data}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# Run the API with Uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8008)