BDEPEND=dev-python/numpy[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/onnx[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/onnxscript[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/optuna[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/pandas[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] =dev-python/gpep517-15[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] DEFINED_PHASES=compile configure install prepare test unpack DESCRIPTION=Olive: Simplify ML Model Finetuning, Conversion, Quantization, and Optimization for CPUs, GPUs and NPUs. [wheel] EAPI=8 HOMEPAGE=https://microsoft.github.io/Olive/ INHERIT=python-r1 gs-pypi IUSE=auto-opt azureml bnb capture-onnx-graph cpu directml docker finetune flash-attn gpu inc lora nvmo openvino optimum qnn shared-cache tf torch-tensorrt tune-session-params python_targets_python3_10 python_targets_python3_11 python_targets_python3_12 KEYWORDS=~amd64 ~x86 LICENSE=MIT RDEPEND=dev-python/numpy[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/onnx[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/onnxscript[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/optuna[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?] dev-python/pandas[python_targets_python3_10(-)?,python_targets_python3_11(-)?,python_targets_python3_12(-)?]