我正在尝试从图像中提取文本。所以,我使用
google_mlkit_text_recognition
和 camera
API。
问题是对于某些低端设备,UI 无法正确响应并且看起来很滞后,这不是一个良好的用户体验。我无法使用单独的
Isolate
来识别文本,因为 google_mlkit_text_recognition
必须在主隔离上运行。
所以我正在
async
功能中完成工作,但 UI 仍然落后于具有高分辨率相机配置的低端型号。
如果我将分辨率设置为
.medium
,它工作正常,但MLKit
检测性能很差。我正在寻找一些想法来微调这个。
我在下面分享我的代码。
import 'dart:async';
import 'dart:io';
import 'package:camera/camera.dart';
import 'package:flutter/material.dart';
import 'package:google_mlkit_text_recognition/google_mlkit_text_recognition.dart';
import 'package:permission_handler/permission_handler.dart';
import 'package:smartdevice/ui/scan_serial_number/serial_number_validator.dart';
class CameraView extends StatefulWidget {
Function(String)? onCapture;
CameraView({super.key, this.onCapture});
@override
State<CameraView> createState() => _CameraViewState();
}
// Add the WidgetsBindingObserver mixin
class _CameraViewState extends State<CameraView> with WidgetsBindingObserver {
bool _isPermissionGranted = false;
final _textRecognizer = TextRecognizer();
bool _isProcessingImage = false;
late final Future<void> _future;
var result = '';
Timer? _timer;
// Add this controller to be able to control de camera
CameraController? _cameraController;
@override
void initState() {
super.initState();
WidgetsBinding.instance.addObserver(this);
_future = _requestCameraPermission();
}
// We should stop the camera once this widget is disposed
@override
void dispose() {
WidgetsBinding.instance.removeObserver(this);
_stopCamera();
_timer?.cancel();
_textRecognizer.close();
super.dispose();
}
// Starts and stops the camera according to the lifecycle of the app
@override
void didChangeAppLifecycleState(AppLifecycleState state) {
if (_cameraController == null || !_cameraController!.value.isInitialized) {
return;
}
if (state == AppLifecycleState.inactive) {
_stopCamera();
} else if (state == AppLifecycleState.resumed &&
_cameraController != null &&
_cameraController!.value.isInitialized) {
_startCamera();
}
}
@override
Widget build(BuildContext context) {
final screenSize = MediaQuery.of(context).size;
return FutureBuilder(
future: _future,
builder: (context, snapshot) {
return Stack(
children: [
// Show the camera feed behind everything
if (_isPermissionGranted)
FutureBuilder<List<CameraDescription>>(
future: availableCameras(),
builder: (context, snapshot) {
if (snapshot.hasData) {
_initCameraController(snapshot.data!);
if (_cameraController != null) {
return SizedBox(
width: screenSize.width,
height: screenSize.height,
child: CameraPreview(_cameraController!),
);
} else {
return const CircularProgressIndicator();
}
} else {
return const CircularProgressIndicator();
}
},
),
],
);
},
);
}
Future<void> _requestCameraPermission() async {
final status = await Permission.camera.request();
_isPermissionGranted = status == PermissionStatus.granted;
}
void _initCameraController(List<CameraDescription> cameras) {
if (_cameraController != null) {
return;
}
// Select the first rear camera.
CameraDescription? camera;
for (var i = 0; i < cameras.length; i++) {
final CameraDescription current = cameras[i];
if (current.lensDirection == CameraLensDirection.back) {
camera = current;
break;
}
}
if (camera != null) {
_cameraSelected(camera);
}
}
void _startCamera() {
if (_cameraController != null) {
_cameraSelected(_cameraController!.description);
}
}
void _stopCamera() {
if (_cameraController != null) {
_cameraController?.dispose();
}
_timer?.cancel();
}
Future<void> _listenToCameraStream() async {
_timer = Timer.periodic(const Duration(seconds: 1), (timer) async {
if (_cameraController != null && !_isProcessingImage) {
await _scanImage();
}
});
}
Future<void> _cameraSelected(CameraDescription camera) async {
_cameraController = CameraController(
camera,
ResolutionPreset.max,
enableAudio: false,
fps: 30,
);
await _cameraController!.initialize();
if (!mounted) {
return;
}
setState(() {});
_listenToCameraStream();
}
Future<void> _scanImage() async {
if (_cameraController == null) return;
_isProcessingImage = true;
print('## Processing started.');
try {
final pictureFile = await _cameraController!.takePicture();
final file = File(pictureFile.path);
final inputImage = InputImage.fromFile(file);
final recognizedText = await _textRecognizer.processImage(inputImage);
List<String> texts = recognizedText.text.split('\n');
for (final text in texts) {
print('## $text');
if (text.isValidSerialNumber) {
print('## Found valid serial number $text');
}
}
} catch (e) {
print('## Error taking photos - ${e.toString()}');
}
print('## Processing ended.');
_isProcessingImage = false;
}
}
publish_to: 'none'
version: 0.5.0
environment:
sdk: 3.5.3
dependencies:
bloc: 8.1.4
equatable: 2.0.5
flutter:
sdk: flutter
flutter_bloc: 8.1.6
go_router: 14.2.0
domain:
path: domain
data:
path: data
get_it: 7.7.0
injectable: 2.4.2
cached_network_image: 3.3.1
lottie: 3.1.2
flutter_markdown: 0.7.3
url_launcher: 6.3.0
flutter_localizations:
sdk: flutter
intl: 0.19.0
youtube_player_iframe: 5.1.3
device_info_plus: 9.1.2
camera: 0.11.0
permission_handler: ^11.3.1
google_mlkit_text_recognition: ^0.13.1
image: ^4.2.0
dev_dependencies:
test: 1.25.7 # workaround to have the dependency in the sub packages
flutter_test:
sdk: flutter
flutter_lints: 4.0.0
mocktail: 1.0.4
injectable_generator: 2.6.1
# Tool for generating type-safe routes with go_router
go_router_builder: 2.7.1
build_runner: 2.4.11
melos: 6.1.0
bloc_test: 9.1.7
module:
androidX: true
androidPackage: com.liebherr.hau.smartdevice
iosBundleIdentifier: com.liebherr.hau.smartdevice
有类似问题的朋友:
google_mlkit_text_recognition
使用MethodChannel
在后台调用Android和iOS的原生API进行图像处理。在这种情况下,我们不能使用 flutter Isolate
、compute
、method
来运行此图像处理或任何其他直接依赖于主隔离的插件。当您尝试在不同的 textRecognizer.processImage()
中执行 isolate
方法时,它会抛出“错误状态错误”。
However, code in a spawned isolate will generally not be able to interact with Flutter plugins. This is due to the tight integration between the platform plugin scaffolding and the main application isolate.
解决方案:可以使用以下
Flutter
包flutter_isolate。他们以某种方式处理了这个问题。在这种情况下,您可以与后台运行有问题的类似 flutter 插件进行通信。