Files
modal-examples/01_getting_started/inference_endpoint.py
Charles Frye fded1479d9 improved getting started/demo examples (#1383)
* return result in inference example

* add LLM inference samples

* fix arg type

* mostly placate mypy, silence it

* proper testing for the endpoint example

* bump timeout on mypy
2025-10-30 18:08:12 -07:00

34 lines
874 B
Python

# ---
# cmd: ["modal", "serve", "01_getting_started/inference_endpoint.py"]
# ---
from pathlib import Path
import modal
app = modal.App("example-inference-endpoint")
image = (
modal.Image.debian_slim()
.uv_pip_install("transformers[torch]")
.uv_pip_install("fastapi")
)
@app.function(gpu="h100", image=image)
@modal.fastapi_endpoint(docs=True)
def chat(prompt: str | None = None) -> list[dict]:
from transformers import pipeline
if prompt is None:
prompt = f"/no_think Read this code.\n\n{Path(__file__).read_text()}\nIn one paragraph, what does the code do?"
print(prompt)
context = [{"role": "user", "content": prompt}]
chatbot = pipeline(
model="Qwen/Qwen3-1.7B-FP8", device_map="cuda", max_new_tokens=1024
)
result = chatbot(context)
print(result[0]["generated_text"][-1]["content"])
return result