mirror of
https://github.com/immich-app/immich.git
synced 2025-01-21 00:52:43 -05:00
feat: Add additional env variables for Machine Learning (#15326)
* Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Update config.py * Add additional variables to preload part ML models * Add additional variables to preload part ML models * Apply formatting * minor update * formatting * root validator * minor update * minor update * minor update * change to support explicit models * minor update * minor change * minor change * minor change * minor update * add logs, resolve errors * minor change * add new enviornment variables * minor revisons * remove comments
This commit is contained in:
parent
5d2e421800
commit
c5476a99b1
4 changed files with 87 additions and 34 deletions
|
@ -148,24 +148,26 @@ Redis (Sentinel) URL example JSON before encoding:
|
|||
|
||||
## Machine Learning
|
||||
|
||||
| Variable | Description | Default | Containers |
|
||||
| :-------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
|
||||
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
|
||||
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
|
||||
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
|
||||
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
|
||||
| Variable | Description | Default | Containers |
|
||||
| :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------- | :-----------------------------: | :--------------- |
|
||||
| `MACHINE_LEARNING_MODEL_TTL` | Inactivity time (s) before a model is unloaded (disabled if \<= 0) | `300` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_TTL_POLL_S` | Interval (s) between checks for the model TTL (disabled if \<= 0) | `10` | machine learning |
|
||||
| `MACHINE_LEARNING_CACHE_FOLDER` | Directory where models are downloaded | `/cache` | machine learning |
|
||||
| `MACHINE_LEARNING_REQUEST_THREADS`<sup>\*1</sup> | Thread count of the request thread pool (disabled if \<= 0) | number of CPU cores | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKERS`<sup>\*2</sup> | Number of worker processes to spawn | `1` | machine learning |
|
||||
| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`<sup>\*3</sup> | HTTP Keep-alive time in seconds | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO) | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL` | Name of the textual CLIP model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__CLIP__VISUAL` | Name of the visual CLIP model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION` | Name of the recognition portion of the facial recognition model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION` | Name of the detection portion of the facial recognition model to be preloaded and kept in cache | | machine learning |
|
||||
| `MACHINE_LEARNING_ANN` | Enable ARM-NN hardware acceleration if supported | `True` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_FP16_TURBO` | Execute operations in FP16 precision: increasing speed, reducing precision (applies only to ARM-NN) | `False` | machine learning |
|
||||
| `MACHINE_LEARNING_ANN_TUNING_LEVEL` | ARM-NN GPU tuning level (1: rapid, 2: normal, 3: exhaustive) | `2` | machine learning |
|
||||
| `MACHINE_LEARNING_DEVICE_IDS`<sup>\*4</sup> | Device IDs to use in multi-GPU environments | `0` | machine learning |
|
||||
| `MACHINE_LEARNING_MAX_BATCH_SIZE__FACIAL_RECOGNITION` | Set the maximum number of faces that will be processed at once by the facial recognition model | None (`1` if using OpenVINO) | machine learning |
|
||||
|
||||
\*1: It is recommended to begin with this parameter when changing the concurrency levels of the machine learning service and then tune the other ones.
|
||||
|
||||
|
|
|
@ -14,9 +14,41 @@ from uvicorn import Server
|
|||
from uvicorn.workers import UvicornWorker
|
||||
|
||||
|
||||
class ClipSettings(BaseModel):
|
||||
textual: str | None = None
|
||||
visual: str | None = None
|
||||
|
||||
|
||||
class FacialRecognitionSettings(BaseModel):
|
||||
recognition: str | None = None
|
||||
detection: str | None = None
|
||||
|
||||
|
||||
class PreloadModelData(BaseModel):
|
||||
clip: str | None = None
|
||||
facial_recognition: str | None = None
|
||||
clip: ClipSettings = ClipSettings()
|
||||
facial_recognition: FacialRecognitionSettings = FacialRecognitionSettings()
|
||||
|
||||
clip_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__CLIP", None)
|
||||
facial_recognition_model_fallback: str | None = os.getenv("MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION", None)
|
||||
|
||||
def update_from_fallbacks(self) -> None:
|
||||
if self.clip_model_fallback:
|
||||
self.clip.textual = self.clip_model_fallback
|
||||
self.clip.visual = self.clip_model_fallback
|
||||
log.warning(
|
||||
"Deprecated env variable: MACHINE_LEARNING_PRELOAD__CLIP. "
|
||||
"Use MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL and "
|
||||
"MACHINE_LEARNING_PRELOAD__CLIP__VISUAL instead."
|
||||
)
|
||||
|
||||
if self.facial_recognition_model_fallback:
|
||||
self.facial_recognition.recognition = self.facial_recognition_model_fallback
|
||||
self.facial_recognition.detection = self.facial_recognition_model_fallback
|
||||
log.warning(
|
||||
"Deprecated environment variable: MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION. "
|
||||
"Use MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION and "
|
||||
"MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION instead."
|
||||
)
|
||||
|
||||
|
||||
class MaxBatchSize(BaseModel):
|
||||
|
|
|
@ -76,18 +76,29 @@ async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]:
|
|||
|
||||
async def preload_models(preload: PreloadModelData) -> None:
|
||||
log.info(f"Preloading models: {preload}")
|
||||
if preload.clip is not None:
|
||||
model = await model_cache.get(preload.clip, ModelType.TEXTUAL, ModelTask.SEARCH)
|
||||
|
||||
if preload.clip.textual is not None:
|
||||
model = await model_cache.get(preload.clip.textual, ModelType.TEXTUAL, ModelTask.SEARCH)
|
||||
await load(model)
|
||||
|
||||
model = await model_cache.get(preload.clip, ModelType.VISUAL, ModelTask.SEARCH)
|
||||
if preload.clip.visual is not None:
|
||||
model = await model_cache.get(preload.clip.visual, ModelType.VISUAL, ModelTask.SEARCH)
|
||||
await load(model)
|
||||
|
||||
if preload.facial_recognition is not None:
|
||||
model = await model_cache.get(preload.facial_recognition, ModelType.DETECTION, ModelTask.FACIAL_RECOGNITION)
|
||||
if preload.facial_recognition.detection is not None:
|
||||
model = await model_cache.get(
|
||||
preload.facial_recognition.detection,
|
||||
ModelType.DETECTION,
|
||||
ModelTask.FACIAL_RECOGNITION,
|
||||
)
|
||||
await load(model)
|
||||
|
||||
model = await model_cache.get(preload.facial_recognition, ModelType.RECOGNITION, ModelTask.FACIAL_RECOGNITION)
|
||||
if preload.facial_recognition.recognition is not None:
|
||||
model = await model_cache.get(
|
||||
preload.facial_recognition.recognition,
|
||||
ModelType.RECOGNITION,
|
||||
ModelTask.FACIAL_RECOGNITION,
|
||||
)
|
||||
await load(model)
|
||||
|
||||
|
||||
|
|
|
@ -700,11 +700,13 @@ class TestCache:
|
|||
await model_cache.get("test_model_name", ModelType.TEXTUAL, ModelTask.SEARCH)
|
||||
|
||||
async def test_preloads_clip_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None:
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP"] = "ViT-B-32__openai"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL"] = "ViT-B-32__openai"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP__VISUAL"] = "ViT-B-32__openai"
|
||||
|
||||
settings = Settings()
|
||||
assert settings.preload is not None
|
||||
assert settings.preload.clip == "ViT-B-32__openai"
|
||||
assert settings.preload.clip.textual == "ViT-B-32__openai"
|
||||
assert settings.preload.clip.visual == "ViT-B-32__openai"
|
||||
|
||||
model_cache = ModelCache()
|
||||
monkeypatch.setattr("app.main.model_cache", model_cache)
|
||||
|
@ -721,11 +723,13 @@ class TestCache:
|
|||
async def test_preloads_facial_recognition_models(
|
||||
self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock
|
||||
) -> None:
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION"] = "buffalo_s"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION"] = "buffalo_s"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION"] = "buffalo_s"
|
||||
|
||||
settings = Settings()
|
||||
assert settings.preload is not None
|
||||
assert settings.preload.facial_recognition == "buffalo_s"
|
||||
assert settings.preload.facial_recognition.detection == "buffalo_s"
|
||||
assert settings.preload.facial_recognition.recognition == "buffalo_s"
|
||||
|
||||
model_cache = ModelCache()
|
||||
monkeypatch.setattr("app.main.model_cache", model_cache)
|
||||
|
@ -740,13 +744,17 @@ class TestCache:
|
|||
)
|
||||
|
||||
async def test_preloads_all_models(self, monkeypatch: MonkeyPatch, mock_get_model: mock.Mock) -> None:
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP"] = "ViT-B-32__openai"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION"] = "buffalo_s"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP__TEXTUAL"] = "ViT-B-32__openai"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__CLIP__VISUAL"] = "ViT-B-32__openai"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__RECOGNITION"] = "buffalo_s"
|
||||
os.environ["MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION__DETECTION"] = "buffalo_s"
|
||||
|
||||
settings = Settings()
|
||||
assert settings.preload is not None
|
||||
assert settings.preload.clip == "ViT-B-32__openai"
|
||||
assert settings.preload.facial_recognition == "buffalo_s"
|
||||
assert settings.preload.clip.visual == "ViT-B-32__openai"
|
||||
assert settings.preload.clip.textual == "ViT-B-32__openai"
|
||||
assert settings.preload.facial_recognition.recognition == "buffalo_s"
|
||||
assert settings.preload.facial_recognition.detection == "buffalo_s"
|
||||
|
||||
model_cache = ModelCache()
|
||||
monkeypatch.setattr("app.main.model_cache", model_cache)
|
||||
|
|
Loading…
Add table
Reference in a new issue