diff --git a/docs/docs/install/environment-variables.md b/docs/docs/install/environment-variables.md index 1f34b5c6d0..b4cb905a0c 100644 --- a/docs/docs/install/environment-variables.md +++ b/docs/docs/install/environment-variables.md @@ -148,24 +148,26 @@ Redis (Sentinel) URL example JSON before encoding: ## Machine Learning -| Variable | Description | Default | Containers | -| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- | -| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning | -| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning | -| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning | -| `MACHINE_LEARNING_REQUEST_THREADS`\*1 | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning | -| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning | -| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning | -| `MACHINE_LEARNING_WORKERS`\*2 | Number of worker processes to spawn | `1` | machine learning | -| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`\*3 | HTTP Keep-alive time in seconds | `2` | machine learning | -| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning | -| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning | -| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning | -| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning | -| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning | -| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning | -| `MACHINE_LEARNING_DEVICE_IDS`\*4 | Device IDs to use in multi-GPU environments | `0` | machine learning | -| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning | +| Variable | Description | Default | Containers | +| :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- | +| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning | +| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning | +| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning | +| `MACHINE_LEARNING_REQUEST_THREADS`\*1 | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning | +| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning | +| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning | +| `MACHINE_LEARNING_WORKERS`\*2 | Number of worker processes to spawn | `1` | machine learning | +| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`\*3 | HTTP Keep-alive time in seconds | `2` | machine learning | +| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning | +| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Name of the textual CLIP model to be preloaded and kept in cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Name of the visual CLIP model to be preloaded and kept in cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Name of the recognition portion of the facial recognition model to be preloaded and kept in cache | | machine learning | +| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Name of the detection portion of the facial recognition model to be preloaded and kept in cache | | machine learning | +| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning | +| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning | +| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning | +| `MACHINE_LEARNING_DEVICE_IDS`\*4 | Device IDs to use in multi-GPU environments | `0` | machine learning | +| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning | \*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones. diff --git a/machine-learning/app/config.py b/machine-learning/app/config.py index 92799ac692..fa7dc61d47 100644 --- a/machine-learning/app/config.py +++ b/machine-learning/app/config.py @@ -14,9 +14,41 @@ from uvicorn import Server from uvicorn.workers import UvicornWorker +class ClipSettings(BaseModel): + textual: str | None = None + visual: str | None = None + + +class FacialRecognitionSettings(BaseModel): + recognition: str | None = None + detection: str | None = None + + class PreloadModelData(BaseModel): - clip: str | None = None - facial_recognition: str | None = None + clip: ClipSettings = ClipSettings() + facial_recognition: FacialRecognitionSettings = FacialRecognitionSettings() + + clip_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__CLIP", None) + facial_recognition_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION", None) + + def update_from_fallbacks(self) -> None: + if self.clip_model_fallback: + self.clip.textual = self.clip_model_fallback + self.clip.visual = self.clip_model_fallback + log.warning( + "Deprecated env variable: MACHINE_LEARNING_PRELOAD__CLIP. " + "Use MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL and " + "MACHINE_LEARNING_PRELOAD__CLIP__VISUAL instead." + ) + + if self.facial_recognition_model_fallback: + self.facial_recognition.recognition = self.facial_recognition_model_fallback + self.facial_recognition.detection = self.facial_recognition_model_fallback + log.warning( + "Deprecated environment variable: MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION. " + "Use MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION and " + "MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION instead." + ) class MaxBatchSize(BaseModel): diff --git a/machine-learning/app/main.py b/machine-learning/app/main.py index 684001b875..ee8881b6c6 100644 --- a/machine-learning/app/main.py +++ b/machine-learning/app/main.py @@ -76,18 +76,29 @@ async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]: async def preload_models(preload: PreloadModelData) -> None: log.info(f"Preloading models: {preload}") - if preload.clip is not None: - model = await model_cache.get(preload.clip, ModelType.TEXTUAL, ModelTask.SEARCH) + + if preload.clip.textual is not None: + model = await model_cache.get(preload.clip.textual, ModelType.TEXTUAL, ModelTask.SEARCH) await load(model) - model = await model_cache.get(preload.clip, ModelType.VISUAL, ModelTask.SEARCH) + if preload.clip.visual is not None: + model = await model_cache.get(preload.clip.visual, ModelType.VISUAL, ModelTask.SEARCH) await load(model) - if preload.facial_recognition is not None: - model = await model_cache.get(preload.facial_recognition, ModelType.DETECTION, ModelTask.FACIAL_RECOGNITION) + if preload.facial_recognition.detection is not None: + model = await model_cache.get( + preload.facial_recognition.detection, + ModelType.DETECTION, + ModelTask.FACIAL_RECOGNITION, + ) await load(model) - model = await model_cache.get(preload.facial_recognition, ModelType.RECOGNITION, ModelTask.FACIAL_RECOGNITION) + if preload.facial_recognition.recognition is not None: + model = await model_cache.get( + preload.facial_recognition.recognition, + ModelType.RECOGNITION, + ModelTask.FACIAL_RECOGNITION, + ) await load(model) diff --git a/machine-learning/app/test_main.py b/machine-learning/app/test_main.py index e5cb63997c..5da3baded7 100644 --- a/machine-learning/app/test_main.py +++ b/machine-learning/app/test_main.py @@ -700,11 +700,13 @@ class TestCache: await model_cache.get("test_model_name", ModelType.TEXTUAL, ModelTask.SEARCH) async def test_preloads_clip_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None: - os.environ["MACHINE_LEARNING_PRELOAD__CLIP"] = "ViT-B-32__openai" + os.environ["MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL"] = "ViT-B-32__openai" + os.environ["MACHINE_LEARNING_PRELOAD__CLIP__VISUAL"] = "ViT-B-32__openai" settings = Settings() assert settings.preload is not None - assert settings.preload.clip == "ViT-B-32__openai" + assert settings.preload.clip.textual == "ViT-B-32__openai" + assert settings.preload.clip.visual == "ViT-B-32__openai" model_cache = ModelCache() monkeypatch.setattr("app.main.model_cache", model_cache) @@ -721,11 +723,13 @@ class TestCache: async def test_preloads_facial_recognition_models( self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock ) -> None: - os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION"] = "buffalo_s" + os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION"] = "buffalo_s" + os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION"] = "buffalo_s" settings = Settings() assert settings.preload is not None - assert settings.preload.facial_recognition == "buffalo_s" + assert settings.preload.facial_recognition.detection == "buffalo_s" + assert settings.preload.facial_recognition.recognition == "buffalo_s" model_cache = ModelCache() monkeypatch.setattr("app.main.model_cache", model_cache) @@ -740,13 +744,17 @@ class TestCache: ) async def test_preloads_all_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None: - os.environ["MACHINE_LEARNING_PRELOAD__CLIP"] = "ViT-B-32__openai" - os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION"] = "buffalo_s" + os.environ["MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL"] = "ViT-B-32__openai" + os.environ["MACHINE_LEARNING_PRELOAD__CLIP__VISUAL"] = "ViT-B-32__openai" + os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION"] = "buffalo_s" + os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION"] = "buffalo_s" settings = Settings() assert settings.preload is not None - assert settings.preload.clip == "ViT-B-32__openai" - assert settings.preload.facial_recognition == "buffalo_s" + assert settings.preload.clip.visual == "ViT-B-32__openai" + assert settings.preload.clip.textual == "ViT-B-32__openai" + assert settings.preload.facial_recognition.recognition == "buffalo_s" + assert settings.preload.facial_recognition.detection == "buffalo_s" model_cache = ModelCache() monkeypatch.setattr("app.main.model_cache", model_cache)