我正在使用react-native-cli版本0.74.0和Andorid设备来调试linux操作系统来编写代码。
我正在尝试使用本机模块制作相机应用程序。 你会问如果已经有了相机包那我为什么要这样做呢? 我有一些定制,包括面部识别。
下面是我实现的代码。让我告诉你,在我实现
cameraType
道具之前,相机工作正常。
MainActivity.kt
package com.real_time_face_detection
import android.os.Bundle
import android.content.pm.PackageManager
import android.widget.FrameLayout
import androidx.activity.result.contract.ActivityResultContracts
import androidx.core.content.ContextCompat
import com.facebook.react.ReactActivity
import com.facebook.react.ReactActivityDelegate
import com.facebook.react.defaults.DefaultReactActivityDelegate
import android.util.Log
import android.widget.Toast
class MainActivity : ReactActivity() {
private var cameraView: CameraView? = null
private val requestPermissionLauncher = registerForActivityResult(ActivityResultContracts.RequestPermission()) { isGranted: Boolean ->
if (isGranted) {
Log.d("FaceRecognition", "Camera permission granted log from MainActivity.kt")
showToast("Camera permission granted")
initializeCameraView()
} else {
showToast("Camera permission denied")
Log.d("FaceRecognition", "Camera permission denied log from MainActivity.kt")
}
}
private fun showToast(message: String) {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show()
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
if (ContextCompat.checkSelfPermission(this, android.Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED) {
Log.d("FaceRecognition", "Camera permission already granted log from MainActivity.kt")
initializeCameraView()
Log.d("FaceRecognition", "initialized Camera ---- log from MainActivity.kt")
} else {
Log.d("FaceRecognition", "Requesting camera permission log from MainActivity.kt")
requestPermissionLauncher.launch(android.Manifest.permission.CAMERA)
Log.d("FaceRecognition", "Requesting camera permission done ------- log from MainActivity.kt")
}
}
private fun initializeCameraView() {
if (cameraView == null) {
cameraView = CameraView(this, this) // Pass both context and lifecycle owner
Log.d("FaceRecognition", "Initializing CameraView log from MainActivity.kt")
addContentView(cameraView, FrameLayout.LayoutParams(FrameLayout.LayoutParams.MATCH_PARENT, FrameLayout.LayoutParams.MATCH_PARENT))
Log.d("FaceRecognition", "Initializing CameraView done---- log from MainActivity.kt")
}
}
override fun getMainComponentName(): String = "real_time_face_detection"
/**
* Returns the instance of the [ReactActivityDelegate]. We use [DefaultReactActivityDelegate]
* which allows you to enable New Architecture with a single boolean flags [fabricEnabled]
*/
override fun createReactActivityDelegate(): ReactActivityDelegate =
DefaultReactActivityDelegate(this, mainComponentName, fabricEnabled)
private val fabricEnabled: Boolean
get() = BuildConfig.IS_NEW_ARCHITECTURE_ENABLED
}
人脸检测包.kt
package com.real_time_face_detection
import com.facebook.react.ReactPackage
import com.facebook.react.bridge.NativeModule
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.uimanager.ViewManager
class FaceDetectionPackage : ReactPackage {
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
return listOf(FaceDetectionModule(reactContext))
}
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
return listOf(CameraViewManager())
}
}
人脸检测模块.kt
package com.real_time_face_detection
import android.util.Log
import com.facebook.react.bridge.ReactApplicationContext
import com.facebook.react.bridge.ReactContextBaseJavaModule
import com.facebook.react.bridge.ReactMethod
import com.facebook.react.module.annotations.ReactModule
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.Face
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
@ReactModule(name = FaceDetectionModule.NAME)
class FaceDetectionModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
companion object {
const val NAME = "FaceDetection"
}
private val detector = FaceDetection.getClient(FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_FAST)
.build())
override fun getName(): String {
return NAME
}
fun processImage(image: InputImage, callback: (List<Face>) -> Unit) {
detector.process(image)
.addOnSuccessListener { faces ->
Log.d("FaceRecognition", "Faces detected: ${faces.size} log from FaceDetectionModule.kt")
callback(faces)
}
.addOnFailureListener { e ->
Log.e("FaceRecognition", "Face detection failed log from FaceDetectionModule.kt", e)
callback(emptyList())
}
}
}
CameraViewManager.kt
package com.real_time_face_detection
import com.facebook.react.uimanager.SimpleViewManager
import com.facebook.react.uimanager.ThemedReactContext
import com.facebook.react.uimanager.annotations.ReactProp
import android.util.Log
import androidx.lifecycle.LifecycleOwner
class CameraViewManager : SimpleViewManager<CameraView>() {
override fun getName(): String {
return "CameraView"
}
override fun createViewInstance(reactContext: ThemedReactContext): CameraView {
Log.d("FaceRecognition", "create view instance, log from CameraViewManager.kt")
val lifecycleOwner = reactContext.currentActivity as? LifecycleOwner
?: throw IllegalStateException("Context is not a LifecycleOwner")
val cameraView = CameraView(reactContext, lifecycleOwner = lifecycleOwner)
val faceDetectionModule = reactContext.getNativeModule(FaceDetectionModule::class.java)
faceDetectionModule?.let {
cameraView.setFaceDetectionModule(it)
}
Log.d("FaceRecognition", "faceDetectionModule set, log from CameraViewManager.kt")
return cameraView
}
@ReactProp(name = "cameraType")
fun setCameraType(view: CameraView, cameraType: String) {
Log.d("FaceRecognition", "Camera type from manager view - $cameraType log from CameraViewManager.kt")
view.startCamera(cameraType)
}
}
MainApplication.kt
package com.real_time_face_detection
import android.app.Application
import com.facebook.react.PackageList
import com.facebook.react.ReactApplication
import com.facebook.react.ReactHost
import com.facebook.react.ReactNativeHost
import com.facebook.react.ReactPackage
import com.facebook.react.defaults.DefaultNewArchitectureEntryPoint.load
import com.facebook.react.defaults.DefaultReactHost.getDefaultReactHost
import com.facebook.react.defaults.DefaultReactNativeHost
import com.facebook.soloader.SoLoader
class MainApplication : Application(), ReactApplication {
override val reactNativeHost: ReactNativeHost =
object : DefaultReactNativeHost(this) {
override fun getPackages(): List<ReactPackage> =
PackageList(this).packages.apply {
// Packages that cannot be autolinked yet can be added manually here, for example:
// add(MyReactNativePackage())
add(FaceDetectionPackage()) // <-- Add this line
}
override fun getJSMainModuleName(): String = "index"
override fun getUseDeveloperSupport(): Boolean = BuildConfig.DEBUG
override val isNewArchEnabled: Boolean get() = BuildConfig.IS_NEW_ARCHITECTURE_ENABLED
override val isHermesEnabled: Boolean get() = BuildConfig.IS_HERMES_ENABLED
}
override val reactHost: ReactHost
get() = getDefaultReactHost(applicationContext, reactNativeHost)
override fun onCreate() {
super.onCreate()
SoLoader.init(this, false)
if (BuildConfig.IS_NEW_ARCHITECTURE_ENABLED) {
// If you opted-in for the New Architecture, we load the native entry point for this app.
load()
}
}
}
CameraView.kt
package com.real_time_face_detection
import android.content.Context
import android.graphics.Canvas
import android.graphics.Paint
import android.graphics.Rect
import android.os.Handler
import android.os.Looper
import android.util.AttributeSet
import android.util.Log
import android.view.View
import android.widget.FrameLayout
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.view.PreviewView
import androidx.core.content.ContextCompat
import androidx.lifecycle.Lifecycle
import androidx.lifecycle.LifecycleEventObserver
import androidx.lifecycle.LifecycleOwner
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.Face
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class CameraView @JvmOverloads constructor(
context: Context,
private val lifecycleOwner: LifecycleOwner,
attrs: AttributeSet? = null,
defStyleAttr: Int = 0
) : FrameLayout(context, attrs, defStyleAttr) {
private var cameraProvider: ProcessCameraProvider? = null
private var cameraExecutor: ExecutorService = Executors.newSingleThreadExecutor()
private var previewView: PreviewView = PreviewView(context)
private var faceOverlay: FaceOverlay = FaceOverlay(context)
private var faceDetectionModule: FaceDetectionModule? = null
init {
Log.d("FaceRecognition", "Initializing CameraView log from CameraView.kt")
setupLayout()
logLifecycleState()
}
private fun setupLayout() {
Log.d("FaceRecognition", "Setting up layout log from CameraView.kt")
previewView.layoutParams = LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT)
faceOverlay.layoutParams = LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT)
this.addView(previewView)
this.addView(faceOverlay)
Log.d("FaceRecognition", "Layout setup completed log from CameraView.kt")
}
fun setFaceDetectionModule(module: FaceDetectionModule) {
faceDetectionModule = module
Log.d("FaceRecognition", "FaceDetectionModule set log from CameraView.kt")
}
private fun logLifecycleState() {
val lifecycle = lifecycleOwner.lifecycle
val observer = LifecycleEventObserver { _, event ->
Log.d("FaceRecognition", "Lifecycle event: $event log from CameraView.kt")
}
lifecycle.addObserver(observer)
}
fun startCamera(cameraType: String) {
Log.d("FaceRecognition", "Starting camera with type: $cameraType log from CameraView.kt")
val cameraSelector = if (cameraType == "front") {
CameraSelector.DEFAULT_FRONT_CAMERA
} else {
CameraSelector.DEFAULT_BACK_CAMERA
}
val cameraProviderFuture = ProcessCameraProvider.getInstance(context)
cameraProviderFuture.addListener({
cameraProvider = cameraProviderFuture.get()
Log.d("FaceRecognition", "CameraProvider obtained log from CameraView.kt")
bindCameraUseCases(cameraSelector)
}, ContextCompat.getMainExecutor(context))
}
private fun bindCameraUseCases(cameraSelector: CameraSelector) {
val cameraProvider = cameraProvider ?: return
val lifecycleState = lifecycleOwner.lifecycle.currentState
Log.d("FaceRecognition", "Lifecycle state during bind: $lifecycleState log from CameraView.kt")
if (lifecycleState.isAtLeast(Lifecycle.State.STARTED)) {
val preview = Preview.Builder().build().also {
Log.d("FaceRecognition", "Setting SurfaceProvider for preview log from CameraView.kt")
// Ensure this is run on the main thread
Handler(Looper.getMainLooper()).post {
it.setSurfaceProvider(previewView.surfaceProvider)
Log.d("FaceRecognition", "SurfaceProvider set for preview log from CameraView.kt")
Log.d("FaceRecognition", "PreviewView visibility: ${previewView.visibility} log from CameraView.kt")
Log.d("FaceRecognition", "PreviewView width: ${previewView.width}, height: ${previewView.height} log from CameraView.kt")
}
}
val imageAnalyzer = ImageAnalysis.Builder().build().also {
Log.d("FaceRecognition", "Setting Analyzer for ImageAnalysis log from CameraView.kt")
it.setAnalyzer(cameraExecutor, { imageProxy -> processImageProxy(imageProxy) })
Log.d("FaceRecognition", "Analyzer set for ImageAnalysis log from CameraView.kt")
}
try {
cameraProvider.unbindAll()
Log.d("FaceRecognition", "All use cases unbound log from CameraView.kt")
cameraProvider.bindToLifecycle(lifecycleOwner, cameraSelector, preview, imageAnalyzer)
Log.d("FaceRecognition", "Camera use cases bound log from CameraView.kt")
} catch (exc: Exception) {
Log.e("FaceRecognition", "Use case binding failed log from CameraView.kt", exc)
}
} else {
Log.e("FaceRecognition", "Lifecycle state not suitable for binding camera use cases log from CameraView.kt")
}
}
private fun processImageProxy(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image ?: return
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
Log.d("FaceRecognition", "Processing image log from CameraView.kt")
faceDetectionModule?.processImage(image) { faces ->
faceOverlay.setFaces(faces)
Log.d("FaceRecognition", "Faces detected: ${faces.size} log from CameraView.kt")
}
imageProxy.close()
}
fun stopCamera() {
Log.d("FaceRecognition", "Stopping camera log from CameraView.kt")
cameraProvider?.unbindAll()
cameraExecutor.shutdown()
}
private class FaceOverlay(context: Context) : View(context) {
private var faces: List<Face> = emptyList()
private val paint = Paint().apply {
color = ContextCompat.getColor(context, android.R.color.holo_green_light)
style = Paint.Style.STROKE
strokeWidth = 8f
}
fun setFaces(faces: List<Face>) {
this.faces = faces
invalidate()
}
override fun onDraw(canvas: Canvas) {
super.onDraw(canvas)
for (face in faces) {
val bounds: Rect = face.boundingBox
canvas.drawRect(bounds, paint)
}
}
}
}
AndroidManifest.xml
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" />
<application
android:name=".MainApplication"
android:label="@string/app_name"
android:icon="@mipmap/ic_launcher"
android:roundIcon="@mipmap/ic_launcher_round"
android:allowBackup="false"
android:theme="@style/AppTheme">
<activity
android:name=".MainActivity"
android:label="@string/app_name"
android:configChanges="keyboard|keyboardHidden|orientation|screenLayout|screenSize|smallestScreenSize|uiMode"
android:launchMode="singleTask"
android:windowSoftInputMode="adjustResize"
android:exported="true">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>
App.jd - React-native 方面
import React, {useState, useEffect} from 'react';
import {View, Button, StyleSheet, PermissionsAndroid} from 'react-native';
import CameraViewWrapper from './CameraViewWrapper';
const App = () => {
const [cameraStarted, setCameraStarted] = useState(false);
const [cameraType, setCameraType] = useState('back');
console.log(cameraType);
console.log(cameraStarted);
useEffect(() => {
requestCameraPermission();
}, []);
const requestCameraPermission = async () => {
console.log('ll');
try {
const granted = await PermissionsAndroid.request(
PermissionsAndroid.PERMISSIONS.CAMERA,
{
title: 'Camera Permission',
message: 'App needs camera permission',
buttonNeutral: 'Ask Me Later',
buttonNegative: 'Cancel',
buttonPositive: 'OK',
},
);
console.log(granted, ';;;;');
if (granted === PermissionsAndroid.RESULTS.GRANTED) {
console.log(granted, '=============');
console.log(PermissionsAndroid.RESULTS.GRANTED, '-------------');
setCameraStarted(true);
} else {
console.log('Camera permission denied');
}
} catch (err) {
console.warn(err);
}
};
const startDetection = () => {
requestCameraPermission();
};
const stopDetection = () => {
setCameraStarted(false);
};
const switchToBackCamera = () => {
setCameraType('back');
};
const switchToFrontCamera = () => {
setCameraType('front');
};
return (
<View style={styles.container}>
{cameraStarted && (
<CameraViewWrapper style={styles.cameraView} cameraType={cameraType} />
)}
<View style={styles.buttonContainer}>
<Button title="Back Camera" onPress={switchToBackCamera} />
<Button title="Front Camera" onPress={switchToFrontCamera} />
<Button title="Start Detection" onPress={startDetection} />
<Button title="Stop Detection" onPress={stopDetection} />
</View>
</View>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'white',
},
cameraView: {
flex: 1,
height: 200,
width: 200,
},
buttonContainer: {
flexDirection: 'row',
justifyContent: 'space-around',
padding: 10,
backgroundColor: 'rgba(0, 0, 0, 0.5)',
position: 'absolute',
bottom: 0,
width: '100%',
},
});
export default App;
CamerViewWrapper.js
import {requireNativeComponent} from 'react-native';
import PropTypes from 'prop-types';
import React from 'react';
const CameraView = requireNativeComponent('CameraView');
const CameraViewWrapper = ({cameraType}) => {
console.log('CameraType being passed ---: ', cameraType, CameraView); // Add this log
return <CameraView style={{flex: 1}} cameraType={cameraType} />;
};
CameraViewWrapper.propTypes = {
cameraType: PropTypes.string.isRequired,
};
export default CameraViewWrapper;
apply plugin: "com.android.application"
apply plugin: "org.jetbrains.kotlin.android"
apply plugin: "com.facebook.react"
/**
* This is the configuration block to customize your React Native Android app.
* By default you don't need to apply any configuration, just uncomment the lines you need.
*/
react {
/* Folders */
// The root of your project, i.e. where "package.json" lives. Default is '..'
// root = file("../")
// The folder where the react-native NPM package is. Default is ../node_modules/react-native
// reactNativeDir = file("../node_modules/react-native")
// The folder where the react-native Codegen package is. Default is ../node_modules/@react-native/codegen
// codegenDir = file("../node_modules/@react-native/codegen")
// The cli.js file which is the React Native CLI entrypoint. Default is ../node_modules/react-native/cli.js
// cliFile = file("../node_modules/react-native/cli.js")
/* Variants */
// The list of variants to that are debuggable. For those we're going to
// skip the bundling of the JS bundle and the assets. By default is just 'debug'.
// If you add flavors like lite, prod, etc. you'll have to list your debuggableVariants.
// debuggableVariants = ["liteDebug", "prodDebug"]
/* Bundling */
// A list containing the node command and its flags. Default is just 'node'.
// nodeExecutableAndArgs = ["node"]
//
// The command to run when bundling. By default is 'bundle'
// bundleCommand = "ram-bundle"
//
// The path to the CLI configuration file. Default is empty.
// bundleConfig = file(../rn-cli.config.js)
//
// The name of the generated asset file containing your JS bundle
// bundleAssetName = "MyApplication.android.bundle"
//
// The entry file for bundle generation. Default is 'index.android.js' or 'index.js'
// entryFile = file("../js/MyApplication.android.js")
//
// A list of extra flags to pass to the 'bundle' commands.
// See https://github.com/react-native-community/cli/blob/main/docs/commands.md#bundle
// extraPackagerArgs = []
/* Hermes Commands */
// The hermes compiler command to run. By default it is 'hermesc'
// hermesCommand = "$rootDir/my-custom-hermesc/bin/hermesc"
//
// The list of flags to pass to the Hermes compiler. By default is "-O", "-output-source-map"
// hermesFlags = ["-O", "-output-source-map"]
}
/**
* Set this to true to Run Proguard on Release builds to minify the Java bytecode.
*/
def enableProguardInReleaseBuilds = false
/**
* The preferred build flavor of JavaScriptCore (JSC)
*
* For example, to use the international variant, you can use:
* `def jscFlavor = 'org.webkit:android-jsc-intl:+'`
*
* The international variant includes ICU i18n library and necessary data
* allowing to use e.g. `Date.toLocaleString` and `String.localeCompare` that
* give correct results when using with locales other than en-US. Note that
* this variant is about 6MiB larger per architecture than default.
*/
def jscFlavor = 'org.webkit:android-jsc:+'
android {
ndkVersion rootProject.ext.ndkVersion
buildToolsVersion rootProject.ext.buildToolsVersion
compileSdk rootProject.ext.compileSdkVersion
namespace "com.real_time_face_detection"
defaultConfig {
applicationId "com.real_time_face_detection"
minSdkVersion rootProject.ext.minSdkVersion
targetSdkVersion rootProject.ext.targetSdkVersion
versionCode 1
versionName "1.0"
}
signingConfigs {
debug {
storeFile file('debug.keystore')
storePassword 'android'
keyAlias 'androiddebugkey'
keyPassword 'android'
}
}
buildTypes {
debug {
signingConfig signingConfigs.debug
}
release {
// Caution! In production, you need to generate your own keystore file.
// see https://reactnative.dev/docs/signed-apk-android.
signingConfig signingConfigs.debug
minifyEnabled enableProguardInReleaseBuilds
proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro"
}
}
}
dependencies {
// The version of react-native is set by the React Native Gradle Plugin
implementation("com.facebook.react:react-android")
implementation "androidx.camera:camera-core:1.2.1"
implementation "androidx.camera:camera-camera2:1.2.1"
implementation "androidx.camera:camera-lifecycle:1.2.1"
implementation "androidx.camera:camera-view:1.2.1"
implementation "androidx.camera:camera-extensions:1.2.1"
implementation "com.google.mlkit:face-detection:16.1.5"
if (hermesEnabled.toBoolean()) {
implementation("com.facebook.react:hermes-android")
} else {
implementation jscFlavor
}
}
apply from: file("../../node_modules/@react-native-community/cli-platform-android/native_modules.gradle"); applyNativeModulesAppBuildGradle(project)
我已经尝试了所有方法,请让我知道我做错了什么以及我应该更新哪些内容才能正常工作。
以前,当我不使用
camerType
道具时,它可以工作,但是当我添加 cameraType
道具时,它停止工作并开始显示黑屏。
即使我重新启动了react-native,我重建了很多次仍然显示相同的内容。
我们有一些类似的项目,我只是在这里重新渲染本机相机容器样式CameraView3.js
import {
StyleSheet,
Text,
View,
NativeModules,
NativeEventEmitter,
Button,
TouchableOpacity,
requireNativeComponent,
PermissionsAndroid,
} from "react-native";
import React, { useState, useEffect, useRef } from "react";
import * as ImagePicker from "expo-image-picker";
const CameraX = requireNativeComponent("CameraView3");
const CameraView3 = () => {
const [imageUri, setImageUri] = useState(null);
const [useLargeStyle, setUseLargeStyle] = useState(false); // Add this state
const cameraRef = useRef(null);
useEffect(() => {
requestCameraPermission();
}, []);
const requestCameraPermission = async () => {
try {
const granted = await PermissionsAndroid.request(
PermissionsAndroid.PERMISSIONS.CAMERA,
{
title: "Camera Permission",
message: "App needs camera permission",
buttonNeutral: "Ask Me Later",
buttonNegative: "Cancel",
buttonPositive: "OK",
}
);
console.log(granted, ";;;;");
if (granted === PermissionsAndroid.RESULTS.GRANTED) {
console.log("XD");
} else {
console.log("Camera permission denied");
}
} catch (err) {
console.warn(err);
}
};
const pickImage = async () => {
let result = await ImagePicker.launchImageLibraryAsync({
mediaTypes: ImagePicker.MediaTypeOptions.Images,
aspect: [4, 3],
quality: 1,
});
if (!result.canceled) {
setImageUri(result.assets[0].uri);
console.log("get image as", result.assets[0].uri);
triggerLayoutUpdate();
}
};
const triggerLayoutUpdate = () => {
if (cameraRef.current) {
cameraRef.current.setNativeProps({ style: { opacity: 0.99 } });
setTimeout(() => {
cameraRef.current.setNativeProps({ style: { opacity: 1 } });
}, 50); // Revert to 1 after a short delay
}
};
const toggleCameraStyle = () => {
setUseLargeStyle((prev) => !prev); // Toggle between the two styles
};
return (
<View style={styles.contaienr}>
<CameraX
style={useLargeStyle ? styles.camera2 : styles.camera}
imageSource={imageUri}
ref={cameraRef}
/>
<Button title="Pick an Image" onPress={pickImage} />
<TouchableOpacity
onPress={toggleCameraStyle}
style={styles.buttonContainer}
>
<Text style={{ color: "#fff" }}>Toggle Camera Size</Text>
</TouchableOpacity>
</View>
);
};
export default CameraView3;
const styles = StyleSheet.create({
contaienr: {
flex: 1,
alignItems: "center",
justifyContent: "center",
backgroundColor: "#aaa",
},
camera: {
width: 300,
height: 300,
},
camera2: {
width: "80%",
height: "80%",
},
buttonContainer: {
minWidth: 200,
margin: 5,
alignItems: "center",
padding: 30,
backgroundColor: "#000000",
color: "#ffff",
},
});
问题类似于有时当我导航到此组件时相机不流式传输,在我的情况下,只需按toggleCameraStyle 更改其工作的容器样式即可修复