我正在使用 React-Native 为 Android 开发一个应用程序。该应用程序应该使用 Google 的 ML Kit PoseDetection API 来计算锻炼的次数。现在我只想让应用程序打开相机,对我使用检测并在相机图像上画线,以便我知道姿势检测有效。我使用了medium.com的以下文章作为模型:https://medium.com/dogtronic/real-time-pose-detection-in-react-native-using-mlkit-e1819847c340 整个代码是为iOS编写的,因此我必须针对Android系统进行一些调整。我承认:我使用 ChatGPT 是因为我不熟悉 Java、Kotlin 等。
gradle 构建总是成功的。 首先是我的 package.json:
{
"name": "PoseDetection",
"displayName": "PoseDetection",
"version": "0.0.1",
"private": true,
"peerDependencies": {
"react-native-worklets-core": "1.3.3"
},
"scripts": {
"android": "react-native run-android",
"ios": "react-native run-ios",
"lint": "eslint .",
"start": "react-native start",
"test": "jest"
},
"dependencies": {
"metro-react-native-babel-preset": "^0.77.0",
"react": "18.2.0",
"react-native": "0.73.6",
"react-native-gesture-handler": "^2.14.0",
"react-native-reanimated": "^3.6.2",
"react-native-svg": "^15.3.0",
"react-native-vision-camera": "^4.0.5",
"react-native-worklets-core": "^1.3.3"
},
"devDependencies": {
"@babel/core": "^7.20.0",
"@babel/plugin-proposal-class-properties": "^7.18.6",
"@babel/plugin-transform-private-methods": "^7.24.7",
"@babel/preset-env": "^7.20.0",
"@babel/runtime": "^7.20.0",
"@react-native/babel-preset": "0.74.84",
"@react-native/eslint-config": "0.74.84",
"@react-native/metro-config": "0.74.84",
"@react-native/typescript-config": "0.74.84",
"@types/react": "^18.2.6",
"@types/react-test-renderer": "^18.0.0",
"babel-jest": "^29.6.3",
"eslint": "^8.19.0",
"jest": "^29.6.3",
"prettier": "2.8.8",
"react-test-renderer": "18.2.0",
"typescript": "5.0.4"
},
"engines": {
"node": ">=18"
}
}
以及android pp中build.gradle的主要部分:
android {
ndkVersion rootProject.ext.ndkVersion
buildToolsVersion rootProject.ext.buildToolsVersion
compileSdk rootProject.ext.compileSdkVersion
namespace "com.posedetection"
defaultConfig {
applicationId "com.posedetection"
minSdkVersion rootProject.ext.minSdkVersion
targetSdkVersion rootProject.ext.targetSdkVersion
versionCode 1
versionName "1.0"
}
signingConfigs {
debug {
storeFile file('debug.keystore')
storePassword 'android'
keyAlias 'androiddebugkey'
keyPassword 'android'
}
}
buildTypes {
debug {
signingConfig signingConfigs.debug
}
release {
// Caution! In production, you need to generate your own keystore file.
// see https://reactnative.dev/docs/signed-apk-android.
signingConfig signingConfigs.debug
minifyEnabled enableProguardInReleaseBuilds
proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro"
}
}
}
dependencies {
implementation("com.facebook.react:react-android")
implementation("com.facebook.react:react-native:+")
implementation("com.facebook.soloader:soloader:0.10.3")
implementation 'com.google.mlkit:pose-detection:18.0.0-beta4'
implementation 'com.google.mlkit:pose-detection-accurate:18.0.0-beta4'
implementation 'com.google.mlkit:vision-common:17.0.0'
implementation 'com.google.mlkit:vision-interfaces:16.0.0'
implementation 'com.google.mlkit:camera:16.0.0-beta3'
implementation 'androidx.annotation:annotation:1.3.0'
implementation 'com.google.android.gms:play-services-tasks:18.0.0'
implementation 'com.android.support:multidex:1.0.3'
implementation project(':react-native-gesture-handler')
implementation project(':react-native-vision-camera')
implementation project(':react-native-svg')
implementation project(':react-native-worklets-core')
if (hermesEnabled.toBoolean()) {
implementation("com.facebook.react:hermes-android")
} else {
implementation jscFlavor
}
}
就像我说的,我大部分时间都使用 ChatGPT。它告诉我创建 2 个文件: PoseDetectionModule.java:
public class PoseDetectionModule extends ReactContextBaseJavaModule {
private final PoseDetector poseDetector;
public PoseDetectionModule(ReactApplicationContext reactContext) {
super(reactContext);
PoseDetectorOptions options = new PoseDetectorOptions.Builder()
.setDetectorMode(PoseDetectorOptions.STREAM_MODE)
.build();
poseDetector = PoseDetection.getClient(options);
}
@Override
public String getName() {
return "PoseDetection";
}
@ReactMethod
public void detectPose(Bitmap bitmap, Promise promise) {
InputImage image = InputImage.fromBitmap(bitmap, 0);
poseDetector.process(image)
.addOnSuccessListener(
pose -> {
WritableMap poseData = Arguments.createMap();
List<PoseLandmark> landmarks = pose.getAllPoseLandmarks();
for (PoseLandmark landmark : landmarks) {
String landmarkName = getLandmarkTypeString(landmark.getLandmarkType());
WritableMap position = Arguments.createMap();
PointF3D position3D = landmark.getPosition3D();
position.putDouble("x", position3D.getX());
position.putDouble("y", position3D.getY());
position.putDouble("z", position3D.getZ());
poseData.putMap(landmarkName, position);
}
promise.resolve(poseData);
})
.addOnFailureListener(
e -> {
promise.reject("Pose Detection Error", e);
});
}
private String getLandmarkTypeString(int landmarkType) {
switch (landmarkType) {
case PoseLandmark.NOSE:
return "NOSE";
case PoseLandmark.LEFT_EYE_INNER:
return "LEFT_EYE_INNER";
case PoseLandmark.LEFT_EYE:
return "LEFT_EYE";
case PoseLandmark.LEFT_EYE_OUTER:
return "LEFT_EYE_OUTER";
case PoseLandmark.RIGHT_EYE_INNER:
return "RIGHT_EYE_INNER";
case PoseLandmark.RIGHT_EYE:
return "RIGHT_EYE";
case PoseLandmark.RIGHT_EYE_OUTER:
return "RIGHT_EYE_OUTER";
case PoseLandmark.LEFT_EAR:
return "LEFT_EAR";
case PoseLandmark.RIGHT_EAR:
return "RIGHT_EAR";
case PoseLandmark.LEFT_MOUTH:
return "LEFT_MOUTH";
case PoseLandmark.RIGHT_MOUTH:
return "RIGHT_MOUTH";
case PoseLandmark.LEFT_SHOULDER:
return "LEFT_SHOULDER";
case PoseLandmark.RIGHT_SHOULDER:
return "RIGHT_SHOULDER";
case PoseLandmark.LEFT_ELBOW:
return "LEFT_ELBOW";
case PoseLandmark.RIGHT_ELBOW:
return "RIGHT_ELBOW";
case PoseLandmark.LEFT_WRIST:
return "LEFT_WRIST";
case PoseLandmark.RIGHT_WRIST:
return "RIGHT_WRIST";
case PoseLandmark.LEFT_PINKY:
return "LEFT_PINKY";
case PoseLandmark.RIGHT_PINKY:
return "RIGHT_PINKY";
case PoseLandmark.LEFT_INDEX:
return "LEFT_INDEX";
case PoseLandmark.RIGHT_INDEX:
return "RIGHT_INDEX";
case PoseLandmark.LEFT_THUMB:
return "LEFT_THUMB";
case PoseLandmark.RIGHT_THUMB:
return "RIGHT_THUMB";
case PoseLandmark.LEFT_HIP:
return "LEFT_HIP";
case PoseLandmark.RIGHT_HIP:
return "RIGHT_HIP";
case PoseLandmark.LEFT_KNEE:
return "LEFT_KNEE";
case PoseLandmark.RIGHT_KNEE:
return "RIGHT_KNEE";
case PoseLandmark.LEFT_ANKLE:
return "LEFT_ANKLE";
case PoseLandmark.RIGHT_ANKLE:
return "RIGHT_ANKLE";
case PoseLandmark.LEFT_HEEL:
return "LEFT_HEEL";
case PoseLandmark.RIGHT_HEEL:
return "RIGHT_HEEL";
case PoseLandmark.LEFT_FOOT_INDEX:
return "LEFT_FOOT_INDEX";
case PoseLandmark.RIGHT_FOOT_INDEX:
return "RIGHT_FOOT_INDEX";
default:
return "UNKNOWN";
}
}
}
PoseDetectionPackage.java:
public class PoseDetectionPackage implements ReactPackage {
@Override
public List<ViewManager> createViewManagers(ReactApplicationContext reactContext) {
return Collections.emptyList();
}
@Override
public List<NativeModule> createNativeModules(
ReactApplicationContext reactContext) {
List<NativeModule> modules = new ArrayList<>();
modules.add(new PoseDetectionModule(reactContext));
return modules;
}
}
我返回了我在 MainApplication.kt 中创建的包:
package com.posedetection
import android.app.Application
import com.facebook.react.ReactApplication
import com.facebook.react.ReactNativeHost
import com.facebook.react.ReactPackage
import com.facebook.react.shell.MainReactPackage
import com.facebook.soloader.SoLoader
import com.swmansion.gesturehandler.RNGestureHandlerPackage
import com.swmansion.reanimated.ReanimatedPackage
import com.posedetection.PoseDetectionPackage
import java.util.Arrays
import com.mrousavy.camera.react.CameraPackage;
import com.horcrux.svg.SvgPackage
import com.worklets.WorkletsPackage
class MainApplication : Application(), ReactApplication {
private val mReactNativeHost: ReactNativeHost = object : ReactNativeHost(this) {
override fun getUseDeveloperSupport(): Boolean {
return BuildConfig.DEBUG
}
override fun getPackages(): List<ReactPackage> {
return Arrays.asList<ReactPackage>(
MainReactPackage(),
PoseDetectionPackage(), // Füge diese Zeile hinzu
RNGestureHandlerPackage(),
ReanimatedPackage(),
CameraPackage(),
SvgPackage(),
WorkletsPackage()
)
}
override fun getJSMainModuleName(): String {
return "index"
}
}
override val reactNativeHost: ReactNativeHost
get() = mReactNativeHost
o }verride fun onCreate() {
super.onCreate()
SoLoader.init(this, /* native exopackage */ false)
// for using flipper just in case
}
}
然后我的项目文件夹中有我的 App.tsx:
import React, { useEffect } from 'react';
import { Dimensions, StyleSheet, Text, useWindowDimensions } from 'react-native';
import { Camera, useFrameProcessor, useCameraDevice } from 'react-native-vision-camera';
import Animated, { useAnimatedProps } from 'react-native-reanimated';
import Svg, { Line } from 'react-native-svg';
import { useSharedValue } from 'react-native-worklets-core';
import { NativeModules } from 'react-native';
const { PoseDetection } = NativeModules;
const AnimatedLine = Animated.createAnimatedComponent(Line);
export function __poseDetection(frame: any): any {
'worklet';
return new Promise((resolve) => {
PoseDetection.detectPose(frame, (result: any) => {
resolve(result);
});
});
}
const usePosition = (pose: any, valueName1: string, valueName2: string) => {
return useAnimatedProps(
() => ({
x1: pose.value[valueName1]?.x ?? 0,
y1: pose.value[valueName1]?.y ?? 0,
x2: pose.value[valueName2]?.x ?? 0,
y2: pose.value[valueName2]?.y ?? 0,
}),
[pose],
);
};
const defaultPose = {
leftShoulder: { x: 0, y: 0 },
rightShoulder: { x: 0, y: 0 },
leftElbow: { x: 0, y: 0 },
rightElbow: { x: 0, y: 0 },
leftWrist: { x: 0, y: 0 },
rightWrist: { x: 0, y: 0 },
leftHip: { x: 0, y: 0 },
rightHip: { x: 0, y: 0 },
leftKnee: { x: 0, y: 0 },
rightKnee: { x: 0, y: 0 },
leftAnkle: { x: 0, y: 0 },
rightAnkle: { x: 0, y: 0 },
};
const App = () => {
const pose = useSharedValue(defaultPose);
const leftWristToElbowPosition = usePosition(pose, 'leftWrist', 'leftElbow');
const leftElbowToShoulderPosition = usePosition(pose, 'leftElbow', 'leftShoulder');
const leftShoulderToHipPosition = usePosition(pose, 'leftShoulder', 'leftHip');
const leftHipToKneePosition = usePosition(pose, 'leftHip', 'leftKnee');
const leftKneeToAnklePosition = usePosition(pose, 'leftKnee', 'leftAnkle');
const rightWristToElbowPosition = usePosition(pose, 'rightWrist', 'rightElbow');
const rightElbowToShoulderPosition = usePosition(pose, 'rightElbow', 'rightShoulder');
const rightShoulderToHipPosition = usePosition(pose, 'rightShoulder', 'rightHip');
const rightHipToKneePosition = usePosition(pose, 'rightHip', 'rightKnee');
const rightKneeToAnklePosition = usePosition(pose, 'rightKnee', 'rightAnkle');
const shoulderToShoulderPosition = usePosition(pose, 'leftShoulder', 'rightShoulder');
const hipToHipPosition = usePosition(pose, 'leftHip', 'rightHip');
const dimensions = useWindowDimensions();
const frameProcessor = useFrameProcessor((frame) => {
'worklet';
const poseObject = __poseDetection(frame);
const xFactor = dimensions.width / frame.width;
const yFactor = dimensions.height / frame.height;
const poseCopy = { ...defaultPose };
Object.keys(poseObject).forEach((v) => {
poseCopy[v] = {
x: poseObject[v].x * xFactor,
y: poseObject[v].y * yFactor,
};
});
pose.value = poseCopy;
}, []);
const device = useCameraDevice('front');
useEffect(() => {
const checkPermissions = async () => {
await Camera.requestCameraPermission();
};
checkPermissions();
}, []);
if (device == null) {
return <Text>Loading...</Text>;
}
return (
<>
<Camera
frameProcessor={frameProcessor}
style={StyleSheet.absoluteFill}
device={device}
isActive={true}
/>
<Svg
height={Dimensions.get('window').height}
width={Dimensions.get('window').width}
style={styles.linesContainer}>
<AnimatedLine animatedProps={leftWristToElbowPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={leftElbowToShoulderPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={leftShoulderToHipPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={leftHipToKneePosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={leftKneeToAnklePosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={rightWristToElbowPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={rightElbowToShoulderPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={rightShoulderToHipPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={rightHipToKneePosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={rightKneeToAnklePosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={shoulderToShoulderPosition} stroke="red" strokeWidth="2" />
<AnimatedLine animatedProps={hipToHipPosition} stroke="red" strokeWidth="2" />
</Svg>
</>
);
};
const styles = StyleSheet.create({
linesContainer: {
position: 'absolute',
top: 0,
left: 0,
height: Dimensions.get('window').height,
width: Dimensions.get('window').width,
},
});
export default App;
我知道代码很多,但我认为它们对于我的问题是必要的。 我收到以下错误:
为什么说不存在?我无法判断我的姿势检测是否根本不起作用、函数编码错误或者我只是导入了错误的内容。
我尝试以不同的方式定义'__poseDetection',看看是否存在逻辑问题,但无论我做什么,错误仍然出现
我按照React Native的指示检查了App.tsx中PoseDetectionModule的导入,似乎没问题
我通过添加调试命令进行了尝试,但没有得到任何有用的东西,甚至什么也没有
我可能也会在 GitHub 上创建一个帖子,因为这个问题应该是我大学的一个项目,所以它非常重要
对于 RN 的错误,我看到提到的属性 __poseDetection 不存在。 所以你可以尝试从你的函数中删除__,只需poseDetection就足够了,我记得,有时JS使用前缀__来处理一些特殊变量。
如果仍然不起作用,请尝试记录 __poseDetection 并查看它(函数或其他东西)是什么。
这很奇怪,为什么你定义“worklet”而没有任何可变增益。