Camera2 API ImageReader.setOnImageAvailableListener() 代码块不执行

问题描述 投票:0回答:1

我正在使用 Camera2 API 和 Jetpack Compose 构建一个 Android 应用程序,以便在按下按钮时捕获图像。我的目标是使用 TextureView 显示相机预览,使用 ImageReader 捕获 JPEG 格式的图像,如果支持的话,还捕获 DEPTH16 格式的深度数据。但是,我遇到了一个问题,即 ImageReader.setOnImageAvailableListener() 中的代码块根本不执行,这使我无法检索捕获的图像。

我的代码如下:

package com.example.tutorial.camera

import android.annotation.SuppressLint
import android.content.ContentValues
import android.content.Context
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.ImageFormat
import android.graphics.SurfaceTexture
import android.hardware.camera2.CameraCaptureSession
import android.hardware.camera2.CameraCharacteristics
import android.hardware.camera2.CameraDevice
import android.hardware.camera2.CameraManager
import android.hardware.camera2.CaptureRequest
import android.media.ImageReader
import android.os.Environment
import android.os.Handler
import android.os.Looper
import android.provider.MediaStore
import android.util.Log
import android.util.Size
import android.view.SurfaceHolder
import android.view.TextureView
import androidx.compose.foundation.layout.*
import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.*
import androidx.compose.ui.viewinterop.AndroidView
import java.io.File
import java.io.FileOutputStream
import java.lang.Exception

@Composable
fun CamPreview(
    onImageCaptured: (Bitmap) -> Unit,
    onImageDCaptured: (ByteArray) -> Unit,
    onDismiss: () -> Unit
) {
    var cameraHandler: CameraHandler? = remember { null }
    var errorMessage by remember { mutableStateOf<String?>(null) }

    // Display camera preview using TextureView
    AndroidView(factory = { context ->
        TextureView(context).apply {
            // Use SurfaceTextureListener to ensure Surface is created
            surfaceTextureListener = object : TextureView.SurfaceTextureListener {
                override fun onSurfaceTextureAvailable(surface: SurfaceTexture, width: Int, height: Int) {
                    cameraHandler =
                        CameraHandler(
                            context = context,
                            view = this@apply,
                            onImageCaptured = { bitmap ->
                                Log.d("CameraPreview", "Image captured successfully: ${bitmap.width}x${bitmap.height}")
                                onImageCaptured(bitmap)
                            },
                            onImageDCaptured = onImageDCaptured,
                            onDismiss = onDismiss,
                            onError = { message -> errorMessage = message })
                    cameraHandler?.startCamera()
                }

                override fun onSurfaceTextureSizeChanged(surface: SurfaceTexture, width: Int, height: Int) {}
                override fun onSurfaceTextureDestroyed(surface: SurfaceTexture): Boolean {
                    cameraHandler?.stopCam()
                    onDismiss()
                    return true
                }

                override fun onSurfaceTextureUpdated(surface: SurfaceTexture) {}
            }
        }
    })

    // Display error alert box
    errorMessage?.let { message ->
        AlertDialog(
            onDismissRequest = { errorMessage = null; },
            title = { Text("Notice") },
            text = { Text(message) },
            confirmButton = {
                Button(onClick = { errorMessage = null; }) { Text("Confirm") }
            }
        )
    }

    // Capture button at the bottom of the screen
    Box(modifier = Modifier.fillMaxSize()) {
        Button(
            onClick = {
                cameraHandler?.takePic()
                onDismiss()  // Close camera preview after capturing
            },
            modifier = Modifier.align(Alignment.BottomCenter)
        ) {
            Text("Capture")
        }
    }
}

class CameraHandler(
    private val context: Context,
    private val view: TextureView,
    private val onImageCaptured: (Bitmap) -> Unit,
    private val onImageDCaptured: (ByteArray) -> Unit,
    private val onDismiss: () -> Unit,  // Use onDismiss to close camera preview
    private val onError: (String) -> Unit
) {
    val camManager = context.getSystemService(Context.CAMERA_SERVICE) as CameraManager
    lateinit var camDevice: CameraDevice
    lateinit var captSession: CameraCaptureSession
    lateinit var imgReader: ImageReader
    lateinit var depImgReader: ImageReader
    lateinit var handler: Handler
    lateinit var captReq: CaptureRequest.Builder

    val camId by mutableStateOf(getDepSupportedCamId())
    var supportsDep: Boolean = false

    private fun getDepSupportedCamId(): String? {
        for (cameraId in camManager.cameraIdList) {
            val characteristics = camManager.getCameraCharacteristics(cameraId)
            // Check the camera's facing direction
            val facing = characteristics.get(CameraCharacteristics.LENS_FACING)
            // Check if the device supports DEPTH16 format
            val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
            if (capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true) {
                // Prefer rear-facing camera
                if (facing == CameraCharacteristics.LENS_FACING_BACK) {
                    return cameraId
                }
            }
        }
        // If no rear-facing camera found, return another camera ID that supports DEPTH16
        for (cameraId in camManager.cameraIdList) {
            val characteristics = camManager.getCameraCharacteristics(cameraId)
            val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
            if (capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true) {
                return cameraId
            }
        }
        // If no camera supports DEPTH16, return rear-facing camera ID
        for (cameraId in camManager.cameraIdList) {
            val characteristics = camManager.getCameraCharacteristics(cameraId)
            val facing = characteristics.get(CameraCharacteristics.LENS_FACING)
            if (facing == CameraCharacteristics.LENS_FACING_BACK) {
                return cameraId
            }
        }
        return null
    }

    @SuppressLint("MissingPermission")
    fun startCamera() {

        if (camId == null) {
            showError("No usable camera")
            return
        }

        val characteristics = camManager.getCameraCharacteristics(camId!!)
        val capabilities = characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
        supportsDep = capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true
        //if(!supportsDepth)showError("This device doesn't support DEPTH16")

        try {
            handler = Handler(Looper.getMainLooper())

            // Open camera
            camManager.openCamera(camId!!, object : CameraDevice.StateCallback() {
                override fun onOpened(device: CameraDevice) {
                    camDevice = device
                    createCameraPreviewSession()
                }

                override fun onDisconnected(device: CameraDevice) {
                    device.close()
                    onDismiss()  // Close camera preview on disconnection
                }

                override fun onError(device: CameraDevice, error: Int) {
                    device.close()
                    showError("Camera Error: $error")
                }
            }, handler)
        } catch (e: Exception) {
            showError("Failed to start camera: ${e.message}")
        }
    }

    private fun createCameraPreviewSession() {
        try {
            // Configure preview with camera's best size
            val characteristics = camId?.let { camManager.getCameraCharacteristics(it) }
            val map = characteristics?.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)
            val previewSize =
                map?.getOutputSizes(SurfaceHolder::class.java)?.maxByOrNull { it.width * it.height } ?: Size(640, 480)
            val width = previewSize.width
            val height = previewSize.height

            val texture = view.surfaceTexture ?: throw IllegalStateException("SurfaceTexture is null")
            texture.setDefaultBufferSize(width, height)
            val previewSurface = android.view.Surface(texture)

            //Log.d("CameraHandler", "")
            Log.d("CameraHandler", "Initializing ImageReader with width=$width, height=$height")
            imgReader = ImageReader.newInstance(width, height, ImageFormat.JPEG, 2)
            if (supportsDep) {
                Log.d("CameraHandler", "Initializing DepthImageReader with width=$width, height=$height")
                depImgReader =ImageReader.newInstance(width, height, ImageFormat.DEPTH16, 2)
            }

            // Create CaptureSession
            camDevice.createCaptureSession(
                listOf(previewSurface),
                object : CameraCaptureSession.StateCallback() {
                    override fun onConfigured(session: CameraCaptureSession) {
                        captSession = session
                        captReq = camDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
                        captReq.addTarget(previewSurface)
                        captSession.setRepeatingRequest(captReq.build(), null, null)
                    }

                    override fun onConfigureFailed(session: CameraCaptureSession) {
                        showError("Failed to configure camera session")
                    }
                },
                null
            )
        } catch (e: Exception) {
            showError("Failed to create preview session: ${e.message}")
        }
    }

    fun takePic() {
        try {
            captReq = camDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE)
            captReq.addTarget(imgReader.surface)
            Log.d("CameraHandler", "At least tried successfully")
            imgReader.setOnImageAvailableListener({ reader ->
                val image = reader.acquireLatestImage()
                if (image != null) {  // Check if image was successfully captured
                    Log.d("CameraHandler", "Image captured")
                    val buffer = image.planes[0].buffer
                    val bytes = ByteArray(buffer.remaining())
                    buffer.get(bytes)

                    val contentValues = ContentValues().apply {
                        put(MediaStore.Images.Media.DISPLAY_NAME, "img.jpeg")
                        put(MediaStore.Images.Media.MIME_TYPE, "image/jpeg")
                        put(MediaStore.Images.Media.RELATIVE_PATH, Environment.DIRECTORY_PICTURES)
                    }
                    val uri = context.contentResolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, contentValues)
                    uri?.let {
                        context.contentResolver.openOutputStream(it)?.use { outputStream ->
                            outputStream.write(bytes)
                        }
                    }

                    val bitmap = BitmapFactory.decodeByteArray(bytes, 0, bytes.size)
                    Log.d("CameraPreview", "Image captured successfully: ${bitmap.width}x${bitmap.height}")
                    onImageCaptured(bitmap)
                    image.close()
                } else {
                    Log.d("CameraHandler", "Failed to capture Image")
                    showError("Failed to acquire image from ImageReader")
                }
            }, handler)
            // Check if depth image needs to be captured
            if (supportsDep) {
                captReq.addTarget(depImgReader.surface)
                depImgReader.setOnImageAvailableListener({ depthReader ->
                    val depthImage = depthReader.acquireLatestImage()
                    val depthBuffer = depthImage.planes[0].buffer
                    val depthBytes = ByteArray(depthBuffer.capacity())
                    depthBuffer.get(depthBytes)

                    // Invoke callback for depth image
                    onImageDCaptured(depthBytes)
                    depthImage.close()
                }, handler)
            }
            captSession.capture(captReq.build(), null, null)
            Log.d("CameraHandler", "Capture request for image sent")
        } catch (e: Exception) {
            showError("Failed to take picture: ${e.message}")
        }
    }

    // Display error popup
    fun showError(message: String) {
        onError(message)
    }

    fun stopCam() {
        try {
            // Stop capture session
            captSession.close()
            // Close camera device
            camDevice.close()
            // Release ImageReader
            imgReader.close()
            // If there is depthImageReader, release it
            if (supportsDep) depImgReader.close()
        } catch (e: Exception) {
            onError("Error closing camera: ${e.message}")
        }
    }
}

我希望 ImageReader.setOnImageAvailableListener() 中的侦听器在每次捕获请求后执行其代码块,以便我检索图像并处理它。为了排除故障,我已经验证了TextureView、imgReader和captSession是否已正确初始化,并且确保在启动相机之前授予相机权限。我还尝试了 ImageReader 的不同缓冲区大小,并确认 CaptReq 包含 imgReader.surface。日志显示 takePicture() 正确启动了捕获请求并定位 ImageReader.surface,但 acquireLatestImage() 永远无法检索图像,因为侦听器未运行。没有抛出任何异常或错误,并且我没有任何图像数据。

什么可能导致 ImageReader.setOnImageAvailableListener() 跳过执行其代码块,我可以采取哪些步骤来进一步解决此问题? Camera2 的 CaptureRequest 或 ImageReader 设置是否有一些我可能忽略的特定内容?

顺便说一句,我尝试在不同类型的手机上运行这个应用程序,我确信它们应该支持深度数据的输出,但它们都没有响应对 DEPTH16 图像的支持。我不知道这些手机是否真的不支持这种类型的图像,或者我请求的方式是否有问题。

谢谢您的帮助!

回复dev.bmax的评论:我尝试将代码更改为较新的版本,但仍然无法运行。您介意提供一些更清晰的建议吗?谢谢

android kotlin android-camera2
1个回答
0
投票

看起来您在配置相机会话时没有添加其他流。

可以尝试这样的事情:

camDevice.createCaptureSession( listOf(previewSurface, imgReader.getSurface()... ) ...

© www.soinside.com 2019 - 2024. All rights reserved.