2023-06-17 08:06:06 +04:00
|
|
|
{
|
|
|
|
"$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg.schema.json",
|
|
|
|
"name": "openvino",
|
2024-09-30 19:21:43 +02:00
|
|
|
"version": "2024.4.0",
|
2023-06-17 08:06:06 +04:00
|
|
|
"maintainers": "OpenVINO Developers <openvino@intel.com>",
|
|
|
|
"summary": "This is a port for Open Visual Inference And Optimization toolkit for AI inference",
|
|
|
|
"description": [
|
|
|
|
"Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing ",
|
|
|
|
"and deploying AI inference. It can be used to develop applications and solutions based ",
|
|
|
|
"on deep learning tasks, such as: emulation of human vision, automatic speech recognition, ",
|
|
|
|
"natural language processing, recommendation systems, etc. It provides high-performance ",
|
|
|
|
"and rich deployment options, from edge to cloud"
|
|
|
|
],
|
|
|
|
"homepage": "https://github.com/openvinotoolkit/openvino",
|
|
|
|
"documentation": "https://docs.openvino.ai/latest/index.html",
|
|
|
|
"license": "Apache-2.0",
|
2024-09-30 19:21:43 +02:00
|
|
|
"supports": "!uwp & !x86 & !(android & arm32)",
|
2023-06-17 08:06:06 +04:00
|
|
|
"dependencies": [
|
|
|
|
{
|
|
|
|
"name": "pkgconf",
|
|
|
|
"host": true
|
|
|
|
},
|
|
|
|
"pugixml",
|
|
|
|
{
|
|
|
|
"name": "tbb",
|
2023-09-19 20:40:15 +04:00
|
|
|
"version>=": "2021.10.0#2"
|
2023-06-17 08:06:06 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "vcpkg-cmake",
|
|
|
|
"host": true
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "vcpkg-cmake-config",
|
|
|
|
"host": true
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "vcpkg-get-python-packages",
|
|
|
|
"host": true
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "xbyak",
|
|
|
|
"platform": "!(arm | uwp)",
|
|
|
|
"version>=": "6.69"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"default-features": [
|
|
|
|
"auto",
|
|
|
|
"auto-batch",
|
2023-08-10 22:04:42 +04:00
|
|
|
{
|
|
|
|
"name": "cpu",
|
|
|
|
"platform": "!(windows & arm)"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "gpu",
|
2023-11-29 13:29:22 +04:00
|
|
|
"platform": "x64 & !(osx | uwp)"
|
2023-08-10 22:04:42 +04:00
|
|
|
},
|
2023-06-17 08:06:06 +04:00
|
|
|
"hetero",
|
|
|
|
"ir",
|
|
|
|
"onnx",
|
|
|
|
"paddle",
|
|
|
|
"pytorch",
|
|
|
|
"tensorflow",
|
|
|
|
"tensorflow-lite"
|
|
|
|
],
|
|
|
|
"features": {
|
|
|
|
"auto": {
|
|
|
|
"description": "Enables Auto plugin for inference"
|
|
|
|
},
|
|
|
|
"auto-batch": {
|
|
|
|
"description": "Enables Auto Batch plugin for inference, useful for throughput mode"
|
|
|
|
},
|
|
|
|
"cpu": {
|
|
|
|
"description": "Enables CPU plugin for inference",
|
|
|
|
"supports": "!(windows & arm)"
|
|
|
|
},
|
|
|
|
"gpu": {
|
|
|
|
"description": "Enables GPU plugin for inference",
|
2023-11-29 13:29:22 +04:00
|
|
|
"supports": "x64 & !(osx | uwp)",
|
2023-06-17 08:06:06 +04:00
|
|
|
"dependencies": [
|
2023-11-29 13:29:22 +04:00
|
|
|
"opencl",
|
|
|
|
"rapidjson"
|
2023-06-17 08:06:06 +04:00
|
|
|
]
|
|
|
|
},
|
|
|
|
"hetero": {
|
|
|
|
"description": "Enables Hetero plugin for inference"
|
|
|
|
},
|
|
|
|
"ir": {
|
|
|
|
"description": "Enables IR frontend for reading models in OpenVINO IR format"
|
|
|
|
},
|
2024-05-03 00:32:36 +02:00
|
|
|
"npu": {
|
|
|
|
"description": "NPU Support",
|
|
|
|
"supports": "x64 & !(osx | uwp) & !static"
|
|
|
|
},
|
2023-06-17 08:06:06 +04:00
|
|
|
"onnx": {
|
|
|
|
"description": "Enables ONNX frontend for reading models in ONNX format",
|
|
|
|
"dependencies": [
|
|
|
|
{
|
|
|
|
"name": "onnx",
|
|
|
|
"version>=": "1.13.1"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
|
|
|
"host": true,
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
}
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"paddle": {
|
|
|
|
"description": "Enables PaddlePaddle frontend for reading models in PaddlePaddle format",
|
|
|
|
"dependencies": [
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
|
|
|
"host": true,
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
}
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"pytorch": {
|
|
|
|
"description": "Enables PyTorch frontend to convert models in PyTorch format"
|
|
|
|
},
|
|
|
|
"tensorflow": {
|
|
|
|
"description": "Enables TensorFlow frontend for reading models in TensorFlow format",
|
|
|
|
"dependencies": [
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "protobuf",
|
|
|
|
"host": true,
|
2023-09-25 23:52:51 -07:00
|
|
|
"version>=": "3.21.2"
|
2023-06-17 08:06:06 +04:00
|
|
|
},
|
|
|
|
"snappy"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"tensorflow-lite": {
|
|
|
|
"description": "Enables TensorFlow Lite frontend for reading models in TensorFlow Lite format",
|
|
|
|
"dependencies": [
|
|
|
|
{
|
|
|
|
"name": "flatbuffers",
|
|
|
|
"version>=": "2.0.6"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"name": "flatbuffers",
|
|
|
|
"host": true,
|
|
|
|
"version>=": "2.0.6"
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|