2024-06-22 12:31:53 +00:00
|
|
|
import uuid
|
2023-10-15 20:04:58 +00:00
|
|
|
import transformers
|
2024-06-23 18:23:15 +00:00
|
|
|
import asyncio
|
2024-06-19 13:06:23 +00:00
|
|
|
import torch
|
2023-10-16 10:45:15 +00:00
|
|
|
from fastapi import FastAPI
|
|
|
|
from pydantic import BaseModel
|
2024-06-20 09:26:16 +00:00
|
|
|
from contextlib import asynccontextmanager
|
|
|
|
from apscheduler.schedulers.background import BackgroundScheduler
|
2023-10-16 10:45:15 +00:00
|
|
|
|
2024-06-19 20:58:08 +00:00
|
|
|
# TODO: Store this in redis down the line.
|
|
|
|
items_pending = {}
|
|
|
|
queue = []
|
|
|
|
items_processed = {}
|
2023-10-16 10:45:15 +00:00
|
|
|
|
2024-06-23 18:23:15 +00:00
|
|
|
async def job():
|
2024-06-20 09:26:16 +00:00
|
|
|
print("Processing queue...")
|
|
|
|
|
2024-06-23 18:23:15 +00:00
|
|
|
while True:
|
|
|
|
|
|
|
|
if len(queue) == 0:
|
|
|
|
# sleep for 5 seconds.
|
|
|
|
print("Queue is empty. Sleeping for 5 seconds.")
|
|
|
|
await asyncio.sleep(5)
|
|
|
|
continue
|
|
|
|
|
2024-06-22 12:31:53 +00:00
|
|
|
# process this item.
|
2024-06-20 09:26:16 +00:00
|
|
|
random_id = queue.pop(0)
|
2024-06-22 12:31:53 +00:00
|
|
|
print(f"Processing item {random_id}")
|
2024-06-20 09:26:16 +00:00
|
|
|
messages = items_pending[random_id]
|
2024-06-23 18:13:39 +00:00
|
|
|
print(f"Messages:")
|
|
|
|
print(messages)
|
2024-06-20 09:26:16 +00:00
|
|
|
outputs = pipe(messages)
|
|
|
|
items_processed[random_id] = outputs
|
2024-06-22 12:31:53 +00:00
|
|
|
del items_pending[random_id]
|
|
|
|
print(f"Processed item {random_id}")
|
2024-06-20 09:26:16 +00:00
|
|
|
|
2024-06-23 18:23:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
2024-06-20 09:26:16 +00:00
|
|
|
@asynccontextmanager
|
|
|
|
async def lifespan(app:FastAPI):
|
2024-06-23 18:23:15 +00:00
|
|
|
await job()
|
2024-06-20 09:26:16 +00:00
|
|
|
yield
|
|
|
|
|
2023-10-16 10:45:15 +00:00
|
|
|
# Declare a Pydantic model for the request body
|
|
|
|
class Prompt(BaseModel):
|
2024-06-22 12:31:53 +00:00
|
|
|
messages: list
|
2023-10-16 10:45:15 +00:00
|
|
|
|
2024-06-19 20:58:08 +00:00
|
|
|
# Declare a Pydantic model for the request body
|
|
|
|
class PromptResult(BaseModel):
|
|
|
|
id: str
|
|
|
|
|
2024-06-18 20:41:29 +00:00
|
|
|
model_path = "/app/Models/Meta-Llama-3-8B-Instruct"
|
2023-10-16 10:45:15 +00:00
|
|
|
|
2024-06-19 13:06:23 +00:00
|
|
|
pipe = transformers.pipeline(
|
|
|
|
"text-generation",
|
|
|
|
model=model_path,
|
|
|
|
# use gpu if available
|
|
|
|
device="cuda" if torch.cuda.is_available() else "cpu",
|
|
|
|
)
|
2023-10-15 17:14:15 +00:00
|
|
|
|
2024-06-20 09:26:16 +00:00
|
|
|
app = FastAPI(lifespan=lifespan)
|
2023-10-16 10:45:15 +00:00
|
|
|
|
2024-06-19 13:06:23 +00:00
|
|
|
@app.get("/")
|
|
|
|
async def root():
|
2024-06-22 12:31:53 +00:00
|
|
|
return {"status": "ok"}
|
2024-06-19 13:06:23 +00:00
|
|
|
|
2023-10-16 10:45:15 +00:00
|
|
|
@app.post("/prompt/")
|
|
|
|
async def create_item(prompt: Prompt):
|
|
|
|
|
2024-06-18 21:08:42 +00:00
|
|
|
# Log prompt to console
|
|
|
|
print(prompt)
|
|
|
|
|
2023-10-16 10:45:15 +00:00
|
|
|
# If not prompt then return bad request error
|
|
|
|
if not prompt:
|
|
|
|
return {"error": "Prompt is required"}
|
|
|
|
|
2024-06-22 12:31:53 +00:00
|
|
|
messages = prompt.messages
|
2024-06-18 17:42:11 +00:00
|
|
|
|
2024-06-22 12:31:53 +00:00
|
|
|
# Generate UUID
|
|
|
|
random_id = str(uuid.uuid4())
|
2024-06-19 20:58:08 +00:00
|
|
|
|
|
|
|
# add to queue
|
|
|
|
|
|
|
|
items_pending[random_id] = messages
|
|
|
|
queue.append(random_id)
|
|
|
|
|
|
|
|
# Return response
|
|
|
|
return {
|
|
|
|
"id": random_id,
|
|
|
|
"status": "queued"
|
|
|
|
}
|
|
|
|
|
2024-06-22 12:31:53 +00:00
|
|
|
@app.get("/queue-status/")
|
|
|
|
async def queue_status():
|
2024-06-22 12:43:04 +00:00
|
|
|
return {"pending": items_pending, "processed": items_processed, "queue": queue}
|
2024-06-22 12:31:53 +00:00
|
|
|
|
2024-06-19 20:58:08 +00:00
|
|
|
@app.post("/prompt-result/")
|
|
|
|
async def prompt_status(prompt_status: PromptResult):
|
|
|
|
|
|
|
|
# Log prompt status to console
|
|
|
|
print(prompt_status)
|
|
|
|
|
|
|
|
# If not prompt status then return bad request error
|
|
|
|
if not prompt_status:
|
|
|
|
return {"error": "Prompt status is required"}
|
|
|
|
|
|
|
|
# check if item is processed.
|
|
|
|
if prompt_status.id in items_processed:
|
|
|
|
|
|
|
|
|
|
|
|
return_value = {
|
|
|
|
"id": prompt_status.id,
|
|
|
|
"status": "processed",
|
|
|
|
"output": items_processed[prompt_status.id]
|
|
|
|
}
|
|
|
|
|
|
|
|
# delete from item_processed
|
|
|
|
del items_processed[prompt_status.id]
|
|
|
|
|
|
|
|
return return_value
|
|
|
|
else:
|
2024-06-20 10:00:07 +00:00
|
|
|
|
|
|
|
status = "not found"
|
|
|
|
|
|
|
|
if prompt_status.id in items_pending:
|
|
|
|
status = "pending"
|
|
|
|
|
2024-06-19 20:58:08 +00:00
|
|
|
return {
|
|
|
|
"id": prompt_status.id,
|
2024-06-20 10:00:07 +00:00
|
|
|
"status": status
|
2024-06-19 20:58:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|