diff --git a/backends/cortex_m/ops/TARGETS b/backends/cortex_m/ops/TARGETS index 18b387c8ad2..12044266ca1 100644 --- a/backends/cortex_m/ops/TARGETS +++ b/backends/cortex_m/ops/TARGETS @@ -17,7 +17,6 @@ runtime.python_library( deps = [ "fbcode//caffe2:torch", "//executorch/backends/cortex_m/passes:passes_utils", - "//executorch/backends/cortex_m/quantizer:quantization_configs", ], ) diff --git a/backends/cortex_m/quantizer/TARGETS b/backends/cortex_m/quantizer/TARGETS deleted file mode 100644 index 0af105efef0..00000000000 --- a/backends/cortex_m/quantizer/TARGETS +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -load("@fbcode_macros//build_defs:python_library.bzl", "python_library") - -oncall("executorch") - -python_library( - name = "quantizer", - srcs = [ - "__init__.py", - "operator_configs.py", - "quantization_configs.py", - "quantizer.py", - ], - deps = [ - "//caffe2:torch", - "//executorch/backends/arm/quantizer:quantization_config", - "//pytorch/ao:torchao", - ], -) - -python_library( - name = "quantization_configs", - srcs = [ - "quantization_configs.py", - ], - deps = [ - "//caffe2:torch", - "//executorch/backends/arm/quantizer:quantization_config", - "//pytorch/ao:torchao", - ], -) diff --git a/backends/cortex_m/quantizer/__init__.py b/backends/cortex_m/quantizer/__init__.py deleted file mode 100644 index 39a3de431ff..00000000000 --- a/backends/cortex_m/quantizer/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -from .quantization_configs import ( # noqa - CMSIS_SOFTMAX_SCALE, - CMSIS_SOFTMAX_ZERO_POINT, - INT8_ACTIVATION_PER_CHANNEL_QSPEC, - INT8_ACTIVATION_PER_TENSOR_QSPEC, - INT8_PER_CHANNEL_CONFIG, - INT8_PER_TENSOR_CONFIG, - INT8_WEIGHT_PER_CHANNEL_QSPEC, - INT8_WEIGHT_PER_TENSOR_QSPEC, - SOFTMAX_OUTPUT_FIXED_QSPEC, - SOFTMAX_PER_TENSOR_CONFIG, -) -from .quantizer import CortexMQuantizer, SharedQspecQuantizer # noqa