Skip to content

Commit

Permalink
Merge pull request #3170 from alibaba/feature/sync
Browse files Browse the repository at this point in the history
MNN:Sync: Sync Internal 3.0.4
  • Loading branch information
jxt1234 authored Jan 22, 2025
2 parents 9889cc1 + 7668152 commit b23b55b
Show file tree
Hide file tree
Showing 248 changed files with 32,828 additions and 20,039 deletions.
34 changes: 20 additions & 14 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ option(MNN_DEBUG_TENSOR_SIZE "Enable Tensor Size" OFF)
option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF)
option(MNN_SUPPORT_RENDER "Enable MNN Render Ops" OFF)
option(MNN_SUPPORT_TRANSFORMER_FUSE "Enable MNN transformer Fuse Ops" OFF)
option(MNN_PORTABLE_BUILD "Link the static version of third party libraries where possible to improve the portability of built executables" OFF)
option(MNN_SEP_BUILD "Build MNN Backends and expression separately. Only works with MNN_BUILD_SHARED_LIBS=ON" ON)
option(NATIVE_LIBRARY_OUTPUT "Native Library Path" OFF)
option(NATIVE_INCLUDE_OUTPUT "Native Include Path" OFF)
Expand Down Expand Up @@ -174,9 +173,6 @@ ENDIF()
IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT MNN_BUILD_SHARED_LIBS AND NOT (MSVC OR WIN32))
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
SET(MNN_SEP_BUILD OFF CACHE BOOL "<docstring>" FORCE)
IF(MNN_BUILD_CONVERTER)
SET(MNN_PORTABLE_BUILD ON CACHE BOOL "<docstring>" FORCE)
ENDIF()
ENDIF()

if(MNN_FORBID_MULTI_THREAD)
Expand Down Expand Up @@ -515,6 +511,7 @@ endif()
if ((NOT MSVC) AND MNN_HIDDEN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fvisibility=hidden")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -fvisibility=hidden")
# Omit frame pointer may cause difficult debug
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer")
endif()
Expand Down Expand Up @@ -701,6 +698,16 @@ IF(MNN_TENSORRT)
list(APPEND MNN_EXTRA_DEPENDS ${MNN_TRT_LIBS})
ENDIF()

IF(MNN_BUILD_OPENCV)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/cv)
IF(MNN_SEP_BUILD)
list(APPEND MNN_DEPS MNNOpenCV)
ELSE()
list(APPEND MNN_TARGETS MNNOpenCV)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNOpenCV>)
ENDIF()
ENDIF()

IF(MNN_BUILD_LLM)
# add_definitions(-DMNN_BUILD_LLM)
include(${CMAKE_CURRENT_LIST_DIR}/transformers/llm/engine/CMakeLists.txt)
Expand All @@ -709,6 +716,13 @@ IF(MNN_BUILD_LLM)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:llm>)
ENDIF()
ENDIF()
IF(MNN_BUILD_DIFFUSION AND MNN_BUILD_OPENCV AND MNN_IMGCODECS)
include(${CMAKE_CURRENT_LIST_DIR}/transformers/diffusion/engine/CMakeLists.txt)
IF(NOT MNN_SEP_BUILD)
list(APPEND MNN_TARGETS diffusion)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:diffusion>)
ENDIF()
ENDIF()

IF(MNN_SEP_BUILD)
add_library(MNN SHARED ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS} ${MNN_EXTRA_HEADERS})
Expand Down Expand Up @@ -774,13 +788,7 @@ IF(WIN32 AND MNN_BUILD_CONVERTER AND MNN_BUILD_SHARED_LIBS)
target_link_libraries(MNN PUBLIC ${Protobuf_LIBRARIES})
ENDIF()
# Merge MNN/MNNExpress/MNNOpenCV and other backends into one .lib/.dll on Windows
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/cv)
IF(MNN_BUILD_OPENCV AND NOT MNN_SEP_BUILD)
IF(MSVC)
target_compile_definitions(MNNOpenCV PRIVATE "-DBUILDING_MNN_DLL" INTERFACE "-DUSING_MNN_DLL")
ENDIF()
target_sources(MNN PRIVATE $<TARGET_OBJECTS:MNNOpenCV>)
ENDIF()

add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/audio)
IF(MNN_BUILD_AUDIO AND NOT MNN_SEP_BUILD)
IF(MSVC)
Expand Down Expand Up @@ -820,9 +828,7 @@ list(REMOVE_ITEM MNN_TARGETS MNN)
IF(MNN_BUILD_DEMO)
include(${CMAKE_CURRENT_LIST_DIR}/demo/exec/CMakeLists.txt)
ENDIF()
IF(MNN_BUILD_DIFFUSION AND MNN_BUILD_OPENCV AND MNN_IMGCODECS)
include(${CMAKE_CURRENT_LIST_DIR}/transformers/diffusion/CMakeLists.txt)
ENDIF()

IF(MNN_BUILD_TOOLS)
include(${CMAKE_CURRENT_LIST_DIR}/tools/cpp/CMakeLists.txt)
ENDIF()
Expand Down
1 change: 0 additions & 1 deletion docs/compile/cmake.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ MNN使用CMake构建项目,CMake中的宏定义列表如下:
| MNN_DEBUG_MEMORY | 是否开启MNN内存调试,默认为`OFF` |
| MNN_DEBUG_TENSOR_SIZE | 是否开启MNN tensor size调试,默认为`OFF` |
| MNN_GPU_TRACE | 是否开启MNN GPU调试,默认为`OFF` |
| MNN_PORTABLE_BUILD | 尽可能链接第三方库的静态版本,以提高构建的可执行文件的可移植性,默认为`OFF` |
| MNN_SEP_BUILD | 是否构建MNN的后端和表达式分离版本,只在`MNN_BUILD_SHARED_LIBS=ON`时生效,默认为`ON` |
| NATIVE_LIBRARY_OUTPUT | 如果构建为动态库,则指定动态库的输出路径,默认为`OFF` |
| NATIVE_INCLUDE_OUTPUT | 如果构建为动态库,则指定动态库的头文件路径,默认为`OFF` |
Expand Down
17 changes: 17 additions & 0 deletions docs/contribute/backend.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,23 @@ virtual void onExecuteEnd() const = 0;
对于使用同一种后端,且存在先后顺序,不会同时运行的模型,MNN提供机制使其共享部分计算资源,比如线程池,内存池等等。
这部分计算资源使用Runtime存储。而Backend则由Runtime创建

### CompileType

Runtime 可以通过指定 CompileType ,决定 MNN 是否跳过几何计算步骤:

```
enum CompilerType {
// 部分执行几何计算,分解形变算子,但不分解 BatchMatMul / Gather 等算子
Compiler_Geometry = 0,
// 完全跳过几何计算步骤,直接使用原始算子
Compiler_Origin = 1,
// 完全执行几何计算,仅此模式下,可以在算子不支持时自动回退到CPU计算
Compiler_Loop = 2,
};
```

### 实现Runtime
Runtime主要实现如下接口:

Expand Down
2 changes: 1 addition & 1 deletion docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
inference/session
inference/module
inference/python
inference/npu

.. toctree::
:maxdepth: 1
Expand Down Expand Up @@ -81,7 +82,6 @@
tools/compress
tools/visual
tools/python
tools/script

.. toctree::
:maxdepth: 1
Expand Down
1 change: 0 additions & 1 deletion docs/inference/module.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ rtmgr->setMode(Interpreter::Session_Debug);
- Interpreter::HintMode::WINOGRAD_MEMORY_LEVEL :使用 Winograd 算法优化卷积时,内存占用倾向,默认为 3 ,若希望降低内存占用可设为 0
- Interpreter::HintMode::GEOMETRY_COMPUTE_MASK :几何计算相关优化开关,1为区域合并,2为复合区域合并,4为使用loop算子,8为支持几何计算重计算,需要多个功能开启时把对应值叠加。默认为功能全开。
- Interpreter::HintMode::DYNAMIC_QUANT_OPTIONS :动态量化选项,1为 Per Batch,2为Per Tensor 。默认为2。
- Interpreter::HintMode::CPU_LITTLECORE_DECREASE_RATE :对于 Android 设备存在大中小核的情况,大核算力到中核算力的衰减比例。默认为50(中核算力为大核的50%)
Expand Down
57 changes: 57 additions & 0 deletions docs/inference/npu.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# NPU 及相应后端使用说明

目前 MNN 支持通过如下后端调用部分手机上的NPU能力:
- CoreML
- NNAPI
- HIAI

目前NPU相关后端均不支持可变形状、控制流等动态模型,算子数相比CPU/GPU支持要少,建议根据NPU是否能跑通,反复调整模型结构。

## CoreML
适用于 Mac / iOS / iPad

### CoreML 后端编译
1. 编译 MNN 时打开编译宏 MNN_COREML :-DMNN_COREML=ON
2. 编译App / 可执行程序时,增加链接 CoreML.framework

### CoreML 后端使用
backend type设置成:MNN_FORWARD_NN

## NNAPI
适用于 Android 系统,高通/联发科芯片

### NNAPI 后端编译
打开编译宏 MNN_NNAPI 即可
```
cd ${MNN}
cd project/android
mkdir build && cd build
../build_64.sh -DMNN_USE_LOGCAT=ON -DMNN_NNAPI=ON
```

### NNAPI 后端使用
backend type设置成:MNN_FORWARD_NN


## 华为 HIAI
适用于 Android 系统, Kirlin芯片

### HIAI 环境准备
1. 从如下链接下载 DDK
https://developer.huawei.com/consumer/cn/doc/hiai-Library/ddk-download-0000001053590180



2. 拷贝相对应的so和include文件到 hiai/3rdParty 目录下,如果没有3rdParty目录,新建一个:

```
mkdir ${MNN}/source/backend/hiai/3rdParty
cp -r ${DDK}/lib ${MNN}/source/backend/hiai/3rdParty/armeabi-v7a
cp -r ${DDK}/lib64 ${MNN}/source/backend/hiai/3rdParty/arm64-v8a
cp -r ${DDK}/include ${MNN}/source/backend/hiai/3rdParty/include
```

### HIAI 编译执行
1. cmake 参数打开npu开关: -DMNN_NPU=true
2. backend type设置成:MNN_FORWARD_USER_0
3. 执行可执行程序(需动态加载:libMNN_NPU.so, libhiai_ir_build.so, libhiai_ir.so, libhiai.so)
4 changes: 2 additions & 2 deletions docs/inference/session.md
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ delete nhwcTensor;
通过这类拷贝数据的方式,用户只需要关注自己创建的tensor的数据布局,`copyFromHostTensor`会负责处理数据布局上的转换(如需)和后端间的数据拷贝(如需)。


### 不推荐】直接填充数据
### 已废弃】直接填充数据
```cpp
auto inputTensor = interpreter->getSessionInput(session, NULL);
inputTensor->host<float>()[0] = 1.f;
Expand Down Expand Up @@ -594,7 +594,7 @@ delete nhwcTensor;
### 【不推荐】直接读取数据
### 【已废弃】直接读取数据
**由于绝大多数用户都不熟悉MNN底层数据布局,所以不要使用这种方式!!!**
```cpp
auto outputTensor = interpreter->getSessionOutput(session, NULL);
Expand Down
39 changes: 38 additions & 1 deletion docs/tools/compress.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ MNN模型压缩工具提供了包括低秩分解、剪枝、量化等模型压
- 动态量化
可以通过如下方式打开MNN运行时的动态量化支持,使权值量化后的模型中卷积等核心算子使用量化计算,降低内存并提升性能

1. 打开 MNN_LOW_MEMORY 编译宏编译 MNN (支持动态量化功能)
1. 打开 `MNN_LOW_MEMORY` 编译宏编译 MNN (支持动态量化功能)
```
cmake .. -DMNN_LOW_MEMORY=ON
```
Expand Down Expand Up @@ -99,6 +99,43 @@ backendConfig.precision = BackendConfig::Precision_Low;
config.backendConfig = &backendConfig;
```

### 自动压缩工具
可使用脚本 `tools/converter/tools/auto_quant.py` 依据测试误差,自动确定量化方案。使用步骤如下:

1. 将模型转成MNN格式,示例:
```
./MNNConvert -f ONNX --modelFile src.onnx --MNNModel float.mnn
```

2. 参考[正确性校验](convert.html#id3),构建测试文件夹 mnntest

3. 在编译好 MNNConvert 的目录下执行脚本
```
# 查看参数
python ../tools/converter/tools/auto_quant.py -h
# 压缩,产出文件 quant.mnn 及相关参数 quant.mnn.json
python ../tools/converter/tools/auto_quant.py --model float.mnn --quant_model quant.mnn --test_dir mnntest --rate 0.05
```

### 自行定制方案
若默认压缩方案无法满足精度需求,可以按如下步骤定制压缩方案。一般来说跳过一些重要的算子压缩,可以缓解精度下降的问题。

1. 生成模型压缩信息文件: user.json
```
rm user.json
./MNNConvert -f ONNX --modelFile src.onnx --MNNModel dst.mnn --weightQuantBits 8 --compressionParamsFile user.json
```

2. 编辑 user.json ,把不需要量化的算子,bits数设为0 或者调高 bits数(不超过8)。也可以参考脚本 `tools/converter/tools/user_quant_modify_demo.py` ,批量处理压缩信息,然后执行脚本:

```
python3 ../tools/converter/tools/user_quant_modify_demo.py user.json user.json
```

3. 使用 user.json 重新转换模型
```
./MNNConvert -f ONNX --modelFile src.onnx --MNNModel dst.mnn --compressionParamsFile user.json
```

## 离线量化工具
### 离线量化工具安装
Expand Down
5 changes: 2 additions & 3 deletions docs/tools/convert.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ Usage:
--weightQuantAsymmetric 与weightQuantBits结合使用,决定是否用非对称量化,默认为`true`

--compressionParamsFile arg
使用MNN模型压缩工具箱生成的模型压缩信息文件或根据用户提供的量化参数来生成对应的量化模型,量化参数文件可参考tools/converter/user_provide_quant_params.json
使用MNN模型压缩工具箱生成的模型压缩信息文件或根据用户提供的量化参数来生成对应的量化模型,量化参数文件可参考tools/converter/user_provide_quant_params.json 。如果文件不存在,且开启了weightQuantBits等量化功能,会在相应路径生成模型压缩信息文件(json格式),可后续编辑

--saveStaticModel 固定输入形状,保存静态模型, default: false

Expand All @@ -74,8 +74,7 @@ Usage:
--alignDenormalizedValue arg
可选值:{0, 1}, 默认为1, 当`float(|x| < 1.18e-38)`会被视为0

--detectSparseSpeedUp arg
可选值:{0, 1}, 默认为1, 会检测权重是否使用稀疏化加速
--detectSparseSpeedUp 检测权重是否使用稀疏化加速/压缩,有可能减少模型大小,但增大模型转换时间

--saveExternalData 将权重,常量等数据存储在额外文件中,默认为0,也就是`false`

Expand Down
70 changes: 0 additions & 70 deletions docs/tools/script.md

This file was deleted.

Loading

0 comments on commit b23b55b

Please sign in to comment.