-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathint8_quantization.py
More file actions
74 lines (62 loc) · 2.99 KB
/
int8_quantization.py
File metadata and controls
74 lines (62 loc) · 2.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#keras to in8 tflite model
# -*- coding: utf-8 -*-
"""UNET_Attention_float32_model_conversion_unit8.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1GQcNk7Wj9jai-nAoR5lIRcYMMOjAwt8R
"""
import tensorflow as tf
import numpy as np
from PIL import Image
import os
import tensorflow as tf
def dice_coef(y_true, y_pred, smooth=1e-6):
y_true_f = tf.reshape(y_true, [-1]) # flatten
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
# Define the missing 'iou' function (Assuming it's Intersection over Union)
def iou(y_true, y_pred, smooth=1e-6):
intersection = tf.reduce_sum(y_true * y_pred)
union = tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) - intersection
iou_score = (intersection + smooth) / (union + smooth)
return iou_score
# Load your keras model, including 'iou' in custom_objects
model = tf.keras.models.load_model('/content/drive/MyDrive/model/best_attention_unet_edgetpu.keras',
custom_objects={'dice_coefficient': dice_coef, 'iou': iou}) # Added 'iou'
image_folder = "/content/drive/MyDrive/neck_pain/train_images"
image_paths = [
os.path.join(image_folder, fname)
for fname in os.listdir(image_folder)
if fname.lower().endswith((".png", ".tif"))
]
INPUT_SIZE=(256, 256)
def representative_dataset_gen():
for img_path in image_paths:
img = Image.open(img_path).convert('L') # Convert to grayscale ('L' mode)
img = img.resize(INPUT_SIZE, Image.BILINEAR)
img_array = np.asarray(img).astype(np.float32)
img_array = img_array / 255.0 # Normalize if your model needs it
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
img_array = np.expand_dims(img_array, axis=-1) # Add channel dimension
yield [img_array]
# Setup TFLite converter
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8 # or tf.uint8 depending on your model
converter.inference_output_type = tf.int8 # or tf.uint8
# Explicitly set the input shape with unknown batch size
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8,
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter._experimental_new_converter = True
converter.inference_input_type = tf.uint8 # or tf.uint8
converter.inference_output_type = tf.uint8 # or tf.uint8
# Convert model
tflite_quant_model = converter.convert()
with open('/content/drive/MyDrive/model/model_int8_quant.tflite', 'wb') as f:
f.write(tflite_quant_model)
print("INT8 quantized TFLite model saved as 'model_int8_quant.tflite'")