Skip to content
Snippets Groups Projects
api.py 1.72 KiB
from typing import Dict

from fastapi import FastAPI, HTTPException

from model_server.session import Session
from model_server.workflow import infer_image_to_image

app = FastAPI()
session = Session()

@app.on_event("startup")
def startup():
    pass

@app.get('/')
def read_root():
    return {'success': True}

@app.get('/models')
def list_active_models():
    return session.describe_loaded_models()

@app.put('/models/load/')
def load_model(model_id: str, params: Dict[str, str] = None) -> dict:
    if model_id in session.models.keys():
        raise HTTPException(
            status_code=409,
            detail=f'Model with id {model_id} has already been loaded'
        )
    session.load_model(model_id, params=params)
    return session.describe_loaded_models()

@app.put('/i2i/infer/{model_id}') # image file in, image file out
def infer_img(model_id: str, input_filename: str, channel: int = None) -> dict:
    if model_id not in session.describe_loaded_models().keys():
        raise HTTPException(
            status_code=409,
            detail=f'Model {model_id} has not been loaded'
        )
    inpath = session.inbound.path / input_filename
    if not inpath.exists():
        raise HTTPException(
            status_code=404,
            detail=f'Could not find file:\n{inpath}'
        )
    model = session.models[model_id]['object']
    record = infer_image_to_image(
        inpath,
        session.models[model_id]['object'],
        session.outbound.path,
        channel=channel,
        # TODO: optional callback for status reporting
    )
    session.record_workflow_run(record)
    return record

# TODO: report out model inference status
@app.get('/i2i/status/{model_id}')
def status_model_inference(model_id: str) -> dict:
    pass