Skip to content

Commit

Permalink
No public description
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 646169145
  • Loading branch information
Google AI Edge authored and junjiang-lab committed Jun 24, 2024
1 parent bb992dd commit a104fa6
Show file tree
Hide file tree
Showing 52 changed files with 563 additions and 747 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from ai_edge_quantizer.utils import test_utils
from ai_edge_quantizer.utils import tfl_flatbuffer_utils

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../tests/models")
_TFLOpName = qtyping.TFLOperationName
_OpExecutionMode = qtyping.OpExecutionMode
_TensorQuantConfig = qtyping.TensorQuantizationConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
_QuantTransformation = qtyping.QuantTransformation
_OpTestInfo = naive_min_max_test_utils.OpTestInfo

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_DEFAULT_ACTIVATION_QUANT_SETTING = (
naive_min_max_test_utils.DEFAULT_ACTIVATION_QUANT_SETTING
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@
from ai_edge_quantizer.utils import test_utils
from ai_edge_quantizer.utils import tfl_flatbuffer_utils

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_TFLOpName = qtyping.TFLOperationName
_OpExecutionMode = qtyping.OpExecutionMode
_TensorQuantConfig = qtyping.TensorQuantizationConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
_QuantTransformation = qtyping.QuantTransformation
_OpTestInfo = naive_min_max_test_utils.OpTestInfo

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_DEFAULT_ACTIVATION_QUANT_SETTING = (
naive_min_max_test_utils.DEFAULT_ACTIVATION_QUANT_SETTING
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@
from ai_edge_quantizer.utils import test_utils
from ai_edge_quantizer.utils import tfl_flatbuffer_utils

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_TFLOpName = qtyping.TFLOperationName
_OpExecutionMode = qtyping.OpExecutionMode
_TensorQuantConfig = qtyping.TensorQuantizationConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
_QuantTransformation = qtyping.QuantTransformation
_OpTestInfo = naive_min_max_test_utils.OpTestInfo

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_DEFAULT_ACTIVATION_QUANT_SETTING = (
naive_min_max_test_utils.DEFAULT_ACTIVATION_QUANT_SETTING
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
_QuantTransformation = qtyping.QuantTransformation
_OpTestInfo = naive_min_max_test_utils.OpTestInfo

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_DEFAULT_ACTIVATION_QUANT_SETTING = (
naive_min_max_test_utils.DEFAULT_ACTIVATION_QUANT_SETTING
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
_QuantTransformation = qtyping.QuantTransformation
_OpTestInfo = naive_min_max_test_utils.OpTestInfo

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile(
"../../../tests/models"
)
_DEFAULT_ACTIVATION_QUANT_SETTING = (
naive_min_max_test_utils.DEFAULT_ACTIVATION_QUANT_SETTING
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from ai_edge_quantizer.utils import test_utils
from ai_edge_quantizer.utils import tfl_flatbuffer_utils

_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../test_models")
_TEST_DATA_PREFIX_PATH = test_utils.get_path_to_datafile("../../tests/models")
_TFLOpName = qtyping.TFLOperationName
_TensorQuantConfig = qtyping.TensorQuantizationConfig

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
_TFLOpName.CONV_2D,
_TFLOpName.AVERAGE_POOL_2D,
_TFLOpName.RESHAPE,
_TFLOpName.SOFTMAX,
])


Expand Down
19 changes: 14 additions & 5 deletions ai_edge_quantizer/calibrator.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Quantization Calibration."""

from collections.abc import Iterable
from typing import Any
from typing import Any, Optional

from absl import logging

Expand All @@ -11,6 +11,8 @@
from ai_edge_quantizer.utils import tfl_flatbuffer_utils
from ai_edge_quantizer.utils import tfl_interpreter_utils

_SignatureInput = dict[str, Any] # input_argument_name -> tensor_value.


class Calibrator:
"""Calibrator for TFLite model."""
Expand All @@ -31,10 +33,11 @@ def __init__(
# TODO(b/330740605)- Collect multiple QSVs in one run to save compute.
def calibrate(
self,
calibration_dataset: Iterable[Any],
calibration_dataset: Iterable[_SignatureInput],
model_recipe_manager: recipe_manager.RecipeManager,
signature_key: Optional[str] = None,
) -> None:
"""Calibrates the model with the given dataset.
"""Calibrates the model using the given dataset for a model signature.
The process is
0. Initialize quantization statistics values (QSVs) using the initialization
Expand All @@ -50,9 +53,13 @@ def calibrate(
6. Start another round of calibration.
Args:
calibration_dataset: A list of input data for calibration.
calibration_dataset: A list of input data for calibration for the given
model signature.
model_recipe_manager: A RecipeManager object that contains the
quantization recipe.
signature_key: the signature key to be used for invoking the models. If
the model doesn't have a signature key (or only has one ), this can be
set to None.
"""
op_codes = self._flatbuffer_model.operatorCodes
if not self._model_qsvs:
Expand All @@ -68,7 +75,9 @@ def calibrate(
# TODO: b/329322226 - Enable parrallel calibration.
for data in calibration_dataset:
# Step1: run tfl interpreter to get tensor content.
tfl_interpreter_utils.invoke_interpreter_once(self._tfl_interpreter, data)
tfl_interpreter_utils.invoke_interpreter_signature(
self._tfl_interpreter, data, signature_key
)
self._tensor_content_map = (
tfl_interpreter_utils.get_tensor_name_to_content_map(
self._tfl_interpreter
Expand Down
8 changes: 4 additions & 4 deletions ai_edge_quantizer/calibrator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ def _representative_dataset_gen(size=(1, 8), num_samples=10):
TEST_MIN_VAL,
TEST_MAX_VAL,
) # fix min/max for testing
yield [vals]
yield {"input_1": vals}


def _add_default_int8xint8_integer_recipe(recipe_manager_object):
recipe_manager_object.add_quantization_config(
regex=".*",
operation_name=qtyping.TFLOperationName.ALL,
operation_name=qtyping.TFLOperationName.ALL_SUPPORTED,
algorithm_key=_AlgorithmName.MIN_MAX_UNIFORM_QUANT,
op_config=qtyping.OpQuantizationConfig(
activation_tensor_config=_TENSOR_QUANT_CONFIG(
Expand All @@ -50,7 +50,7 @@ def setUp(self):
super().setUp()
np.random.seed(0)
self._test_model_path = os.path.join(
TEST_DATA_PREFIX_PATH, "test_models/single_fc.tflite"
TEST_DATA_PREFIX_PATH, "tests/models/single_fc.tflite"
)
self._calibrator = calibrator.Calibrator(self._test_model_path)
self._recipe_manager = recipe_manager.RecipeManager()
Expand Down Expand Up @@ -163,7 +163,7 @@ def test_calibrate_single_fc_success(self):
def test_calibrate_unsupported_ops_fails(self):
# Many ops in the following model are not supported currently.
test_model_path = os.path.join(
TEST_DATA_PREFIX_PATH, "test_models/branching_conv_fc.tflite"
TEST_DATA_PREFIX_PATH, "tests/models/branching_conv_fc.tflite"
)
test_calibrator = calibrator.Calibrator(test_model_path)

Expand Down
6 changes: 1 addition & 5 deletions ai_edge_quantizer/examples/mnist_toy_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,6 @@
This script quantizes the float mnist toy model and runs inference on a sample
mnist image.
blaze run
//third_party/odml/model_customization/quantization/examples:mnist_toy_model
--float_model_path=/tmp/conv_fc_mnist.tflite --img_path=/tmp/sample6.png
"""

import os
Expand All @@ -28,7 +24,7 @@

_FLOAT_MODEL_PATH = flags.DEFINE_string(
'float_model_path',
test_utils.get_path_to_datafile('../test_models/conv_fc_mnist.tflite'),
test_utils.get_path_to_datafile('../tests/models/conv_fc_mnist.tflite'),
'The trained float MNIST toy model path.',
)
_IMG_PATH = flags.DEFINE_string(
Expand Down
10 changes: 5 additions & 5 deletions ai_edge_quantizer/model_modifier.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Model Modifier class that produce the final quantized TFlite model."""

import copy
from typing import Union

import numpy as np

Expand All @@ -16,17 +17,16 @@ class ModelModifier:
"""Model Modifier class that produce the final quantized TFlite model."""

# TODO(b/336599483): support byte array as input
def __init__(self, float_tflite_path: str):
def __init__(self, float_tflite: Union[str, bytearray]):
"""Constructor.
Args:
float_tflite_path: the path to the original TFlite models
float_tflite: the original TFlite model in bytearray or file path
"""
self._float_tflite_path = float_tflite_path
self._flatbuffer_model = tfl_flatbuffer_utils.read_model(float_tflite_path)
self._flatbuffer_model = tfl_flatbuffer_utils.read_model(float_tflite)
self._constant_map = []
self._transformation_instruction_generator = transformation_instruction_generator.TransformationInstructionsGenerator(
self._float_tflite_path
float_tflite
)
self._transformation_performer = (
transformation_performer.TransformationPerformer()
Expand Down
2 changes: 1 addition & 1 deletion ai_edge_quantizer/model_modifier_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class ModelModifierTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._model_path = os.path.join(
TEST_DATA_PREFIX_PATH, 'test_models/conv_fc_mnist.tflite'
TEST_DATA_PREFIX_PATH, 'tests/models/conv_fc_mnist.tflite'
)
self._model_modifier = model_modifier.ModelModifier(self._model_path)
self._model_buffer: bytearray = tfl_flatbuffer_utils.get_model_buffer(
Expand Down
53 changes: 31 additions & 22 deletions ai_edge_quantizer/model_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,48 +13,57 @@
# TODO(b/331655892): have this function automatically detect the input tensor
# type
def compare_model(
reference_model_path: str,
target_model_path: str,
dataset: Iterable[Any],
quantize_target_input: bool,
reference_model: Union[str, bytearray],
target_model: Union[str, bytearray],
signature_dataset: Iterable[dict[str, Any]],
compare_fn: Callable[[Any, Any], float],
signature_key: str | None = None,
quantize_target_input: bool = True,
) -> dict[str, float]:
"""Produces comparison of all intermediate tensors given 2 models and a compare_fn.
"""Compares model tensors over a model signature using the compare_fn.
This function returns a per-tensor mean difference comparison across all
inputs in the dataset, which will be returned at the end of this function
This function will run the model signature on the provided dataset over and
compare all the tensors (cached) using the compare_fn (e.g., mean square
error).
Args:
reference_model_path: path to the model which will be used as the reference
target_model_path: path to the model which we're interested in the output,
we expect reference_model and target_model have the inputs and outputs
dataset: A list of input dataset to be run on reference and target models
quantize_target_input: indicating whether the target requires quantized
input
reference_model: Model which will be used as the reference
target_model: Target model which will be compared against the reference.
We expect reference_model and target_model have the inputs and outputs
signature_dataset: A list of inputs of the signature to be run on reference
and target models.
compare_fn: a comparison function to be used for calculating the statistics,
this function must be taking in two ArrayLike strcuture and output a
single float value
single float value.
signature_key: the signature key to be used for invoking the models. If the
model doesn't have a signature key, this can be set to None.
quantize_target_input: indicating whether the target requires quantized
input.
Returns:
a dictionary of tensor name and a single float value representing
the differences
"""
reference_interpreter = tfl_interpreter_utils.create_tfl_interpreter(
reference_model_path
reference_model
)
target_interpreter = tfl_interpreter_utils.create_tfl_interpreter(
target_model_path
target_model
)
comparison_results = {}

# TODO(b/330797129): enable multi-threaded evaluation
for data in dataset:
tfl_interpreter_utils.invoke_interpreter_once(
reference_interpreter, data, False
# TODO(b/330797129): enable multi-threaded evaluation.
for signature_input in signature_dataset:
tfl_interpreter_utils.invoke_interpreter_signature(
reference_interpreter, signature_input, signature_key
)
tfl_interpreter_utils.invoke_interpreter_once(
target_interpreter, data, quantize_target_input
tfl_interpreter_utils.invoke_interpreter_signature(
target_interpreter,
signature_input,
signature_key,
quantize_input=quantize_target_input,
)

reference_name_to_details = (
tfl_interpreter_utils.get_tensor_name_to_details_map(
reference_interpreter
Expand Down
Loading

0 comments on commit a104fa6

Please sign in to comment.