I'm having trouble because the TensorFlow Lite model produces very different outputs in Python and on Android, even though I'm providing the same input in both. On the Flutter side, I'm using the tflite_flutter package with TensorFlow version 2.11. I also tested with the same version (2.11) in Python, but the difference remains. I've read many documents and done a lot of research, but I still haven't been able to solve the problem.
Model Shape input: (1,80,160,3) Model Shape output: (1,80,160,1)
I/flutter ( 7891): [Tensor{_tensor: Pointer: address=0x7d5388acff60, name: serving_default_input_1:0, type: float32, shape: [1, 80, 160, 3], data: 153600}] I/flutter ( 7891): [Tensor{_tensor: Pointer: address=0x7d5388ad41e0, name: StatefulPartitionedCall:0, type: float32, shape: [1, 80, 160, 1], data: 51200}] python code:
class Lanes():
def __init__(self):
self.recent_fit = []
self.avg_fit = []
def load_tflite_model(model_path):
interpreter = tf.lite.Interpreter("enet_model.tflite")
print(tf.__version__)
interpreter.allocate_tensors()
print("Loaded TensorFlow Lite model successfully.")
print(interpreter.get_input_details())
print(interpreter.get_output_details())
return interpreter
def predict_tflite(interpreter, input_data):
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print("Input data:", input_data)
#print("Input type:", type(input_data))
#print("Input shape:", input_data.shape)
#print("---------------------------------")
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
#print(output_data)
return output_data
def road_lines(image, interpreter, lanes):
h, w = image.shape[:2]
image = image.copy()
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Replace skimage.transform.resize with cv2.resize
small_img = cv2.resize(image, (160, 80), interpolation=cv2.INTER_LINEAR)
small_img = np.array(small_img, dtype=np.float32)
small_img = small_img[None, :, :, :]
#print("small image", small_img)
#print("Resized image for model input:", small_img.shape)
prediction = predict_tflite(interpreter, small_img)[0] * 255.0 # eğer normalize edilmişse çarp
lanes.recent_fit.append(prediction)
print("prediction",prediction)
lanes.avg_fit = np.mean(np.array(lanes.recent_fit), axis=0)
print("lanes recent fit:", lanes.recent_fit)
#print("lanes recent fit,shape:", type(lanes.recent_fit))
print("-------------------------------------------------")
# print("lanes avg fit:", lanes.avg_fit[75])
# print("lanes avg fit,shape:", type(lanes.avg_fit))
print("-------------------------------------------------")
lane_avg_uint8 = np.clip(lanes.avg_fit, 0, 255).astype(np.uint8)
print("lane uin8", lane_avg_uint8)
# print("lane uin8,shape", lane_avg_uint8.shape)
#print("lane uin8,type", type(lane_avg_uint8.shape[0]))
# print("lane uint8 y", lane_avg_uint8[75])
# print("type", type(lane_avg_uint8[75]))
left_boundary = []
right_boundary = []
threshold_boundary = 128
for y in range(lane_avg_uint8.shape[0]):
row = lane_avg_uint8[y]
indices = np.where(row > threshold_boundary)[0]
if indices.size > 0:
left_boundary.append((indices[0], y))
right_boundary.append((indices[-1], y))
print("left boundary", left_boundary)
print("right boundary", right_boundary)
scale_x = w / 160.0
scale_y = h / 80.0
left_boundary_scaled = [(int(x * scale_x), int(y * scale_y)) for (x, y) in left_boundary]
right_boundary_scaled = [(int(x * scale_x), int(y * scale_y)) for (x, y) in right_boundary]
print("left boundary scaled", left_boundary_scaled)
print("right boundary scaled", right_boundary_scaled)
if len(left_boundary_scaled) > 1:
cv2.polylines(image, [np.array(left_boundary_scaled, dtype=np.int32)],
isClosed=False, color=(0, 255, 0), thickness=5)
if len(right_boundary_scaled) > 1:
cv2.polylines(image, [np.array(right_boundary_scaled, dtype=np.int32)],
isClosed=False, color=(0, 255, 0), thickness=5)
left_bottom = left_boundary_scaled[-1] if left_boundary_scaled else None
right_bottom = right_boundary_scaled[-1] if right_boundary_scaled else None
print("Lane detection completed.")
return image
dart code:
import 'package:tflite_flutter/tflite_flutter.dart';
import '../../Debug/logFile.dart';
class LaneDetectionModel with LogFile {
late Interpreter? _interpreter;
List<List<List<List<double>>>> output = List.generate(
1,
(index) => List.generate(
80,
(index) =>
List.generate(160, (index) => List.generate(1, (index) => 0.0))));
List<List<int>> leftLane = [];
List<List<int>> rightLane = [];
LaneDetectionModel() {
loadModel();
}
InterpreterOptions interpreterOptions() {
InterpreterOptions _interpreterOptions = InterpreterOptions();
return _interpreterOptions;
}
Future<void> loadModel() async {
try {
_interpreter = await Interpreter.fromAsset(
'assets/models/enet_model.tflite',
);
print(_interpreter!.getInputTensors());
print(_interpreter!.getOutputTensors());
createModelLog();
} catch (e) {
errorModelLog(e);
}
}
Future<void> model_predict(List input_tensor) async {
try {
List reshape = input_tensor.reshape([ 1,80, 160, 3]);
_interpreter?.run(reshape, output);
List<dynamic> outputTo8 = resultToLane();
Map<String, List<dynamic>> den = extractBoundaries(
output8: outputTo8,
thresholdBoundary: 128,
w: 243,
h: 492,
);
print(den['left']);
print(den['right']);
} catch (e) {
modelPredictErrorLog(e);
}
}
...
}
The model input data is the same in both the Python code and the Android side.
I’ve added a photo below showing an example part where the small_image variable is used in the Python code.
dart model output:
