Skip to content

Serve

Main serve script.

main(cfg) #

Main entry point for serving.

Parameters:

Name Type Description Default
cfg DictConfig

DictConfig configuration composed by Hydra.

required

Returns:

Type Description
None

Optional[float] with optimized metric value.

Source code in src/serve.py
40
41
42
43
44
45
46
47
48
49
50
@hydra.main(version_base="1.3", config_path="../configs", config_name="serve.yaml")
def main(cfg: DictConfig) -> None:
    """Main entry point for serving.

    Args:
        cfg: DictConfig configuration composed by Hydra.

    Returns:
        Optional[float] with optimized metric value.
    """
    serve(cfg)

serve(cfg) #

Serve the specified model in the configuration as a FastAPI api.

Parameters:

Name Type Description Default
cfg DictConfig

A DictConfig configuration composed by Hydra.

required

Returns:

Type Description
None

A tuple with metrics and dict with all instantiated objects.

Source code in src/serve.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@task_wrapper
def serve(cfg: DictConfig) -> None:
    """Serve the specified model in the configuration as a FastAPI api.

    Args:
        cfg: A DictConfig configuration composed by Hydra.

    Returns:
        A tuple with metrics and dict with all instantiated objects.
    """
    # set seed for random number generators in pytorch, numpy and python.random
    if cfg.get("seed"):
        lightning.seed_everything(cfg.seed, workers=True)
    log.info(f"Getting model class <{cfg.model._target_}>")
    model_class = hydra.utils.get_class(cfg.model._target_)
    lit_server_api = hydra.utils.instantiate(cfg.serve.api, model_class=model_class)
    # Create the LitServe server with the MNISTServeAPI
    server = ls.LitServer(lit_server_api, accelerator=cfg.serve.accelerator, max_batch_size=cfg.serve.max_batch_size)
    log.info("Initialized LitServe server")
    # Run the server on port 8000
    log.info(f"Starting LitServe server on port {cfg.serve.port}")
    server.run(port=cfg.serve.port)