React-Native:为语音助手开发 React-Native Android 应用程序 |语音激活错误(file-build.gradle,index.ts)

问题描述 投票:0回答:1

我正在尝试在react-native中开发语音助手,但即使在我安装了react-native-community/voice之后,仍然出现错误:..Build file 'E: eact_native_projects\VoxAI 颂歌模块 act-native-voice android uild。 等级线:59

  • 出了什么问题: 评估项目“:react-native-voice”时出现问题。

在 org.gradle.api.internal.artifacts.dsl.dependency.DefaultDependencyHandler 类型的对象上找不到参数 [directory 'libs'] 的compile()方法。

尝试让麦克风工作,但最终出现错误 startRecoding 和 stopRecording

构建.gradle

apply plugin: 'com.android.library'

repositories {
    mavenLocal()
    jcenter()
    maven {
        // For developing the library outside the context of the example app, expect `react-native`
        // to be installed at `./node_modules`.
        url "$projectDir/../node_modules/react-native/android"
    }
    maven {
        // For developing the example app.
        url "$projectDir/../../react-native/android"
    }
}

def DEFAULT_COMPILE_SDK_VERSION = 23
def DEFAULT_BUILD_TOOLS_VERSION = "23.0.2"
def DEFAULT_TARGET_SDK_VERSION = 23
def DEFAULT_SUPPORT_LIB_VERSION = "23.1.1"

android {
    compileSdkVersion rootProject.hasProperty('compileSdkVersion') ? rootProject.compileSdkVersion : DEFAULT_COMPILE_SDK_VERSION
    buildToolsVersion rootProject.hasProperty('buildToolsVersion') ? rootProject.buildToolsVersion : DEFAULT_BUILD_TOOLS_VERSION

    defaultConfig {
        minSdkVersion 15
        targetSdkVersion rootProject.hasProperty('targetSdkVersion') ? rootProject.targetSdkVersion : DEFAULT_TARGET_SDK_VERSION
        versionCode 1
        versionName "1.0"
    }
    buildTypes {
        release {
            minifyEnabled false
            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
        }
    }
}

buildscript {
    repositories {
        jcenter()
    }
    dependencies {
        classpath 'com.android.tools.build:gradle:1.5.0'

    }
}

allprojects {
    repositories {
        jcenter()
    }
}

def supportVersion = rootProject.hasProperty('supportLibVersion') ? rootProject.supportLibVersion : DEFAULT_SUPPORT_LIB_VERSION

dependencies {
    compile fileTree(dir: 'libs', include: ['*.jar'])
    testCompile 'junit:junit:4.12'
    compile 'com.android.support:appcompat-v7:${supportVersion}'
    compile 'com.facebook.react:react-native:+'
}

索引.ts

import { NativeModules, NativeEventEmitter, Platform } from 'react-native';
import invariant from 'invariant';
import {
  VoiceModule,
  SpeechEvents,
  SpeechRecognizedEvent,
  SpeechErrorEvent,
  SpeechResultsEvent,
  SpeechStartEvent,
  SpeechEndEvent,
  SpeechVolumeChangeEvent,
} from './VoiceModuleTypes';

const Voice = NativeModules.Voice as VoiceModule;

// NativeEventEmitter is only availabe on React Native platforms, so this conditional is used to avoid import conflicts in the browser/server
const voiceEmitter =
  Platform.OS !== 'web' ? new NativeEventEmitter(Voice) : null;
type SpeechEvent = keyof SpeechEvents;

class RCTVoice {
  _loaded: boolean;
  _listeners: any[] | null;
  _events: Required<SpeechEvents>;

  constructor() {
    this._loaded = false;
    this._listeners = null;
    this._events = {
      onSpeechStart: () => {},
      onSpeechRecognized: () => {},
      onSpeechEnd: () => {},
      onSpeechError: () => {},
      onSpeechResults: () => {},
      onSpeechPartialResults: () => {},
      onSpeechVolumeChanged: () => {},
    };
  }

  removeAllListeners() {
    Voice.onSpeechStart = undefined;
    Voice.onSpeechRecognized = undefined;
    Voice.onSpeechEnd = undefined;
    Voice.onSpeechError = undefined;
    Voice.onSpeechResults = undefined;
    Voice.onSpeechPartialResults = undefined;
    Voice.onSpeechVolumeChanged = undefined;
  }

  destroy() {
    if (!this._loaded && !this._listeners) {
      return Promise.resolve();
    }
    return new Promise((resolve, reject) => {
      Voice.destroySpeech((error: string) => {
        if (error) {
          reject(new Error(error));
        } else {
          if (this._listeners) {
            this._listeners.map(listener => listener.remove());
            this._listeners = null;
          }
          resolve();
        }
      });
    });
  }

  start(locale: any, options = {}) {
    if (!this._loaded && !this._listeners && voiceEmitter !== null) {
      this._listeners = (Object.keys(this._events) as SpeechEvent[]).map(
        (key: SpeechEvent) => voiceEmitter.addListener(key, this._events[key]),
      );
    }

    return new Promise((resolve, reject) => {
      const callback = (error: string) => {
        if (error) {
          reject(new Error(error));
        } else {
          resolve();
        }
      };
      if (Platform.OS === 'android') {
        Voice.startSpeech(
          locale,
          Object.assign(
            {
              EXTRA_LANGUAGE_MODEL: 'LANGUAGE_MODEL_FREE_FORM',
              EXTRA_MAX_RESULTS: 5,
              EXTRA_PARTIAL_RESULTS: true,
              REQUEST_PERMISSIONS_AUTO: true,
            },
            options,
          ),
          callback,
        );
      } else {
        Voice.startSpeech(locale, callback);
      }
    });
  }
  stop() {
    if (!this._loaded && !this._listeners) {
      return Promise.resolve();
    }
    return new Promise((resolve, reject) => {
      Voice.stopSpeech(error => {
        if (error) {
          reject(new Error(error));
        } else {
          resolve();
        }
      });
    });
  }
  cancel() {
    if (!this._loaded && !this._listeners) {
      return Promise.resolve();
    }
    return new Promise((resolve, reject) => {
      Voice.cancelSpeech(error => {
        if (error) {
          reject(new Error(error));
        } else {
          resolve();
        }
      });
    });
  }
  isAvailable(): Promise<0 | 1> {
    return new Promise((resolve, reject) => {
      Voice.isSpeechAvailable((isAvailable: 0 | 1, error: string) => {
        if (error) {
          reject(new Error(error));
        } else {
          resolve(isAvailable);
        }
      });
    });
  }

  /**
   * (Android) Get a list of the speech recognition engines available on the device
   * */
  getSpeechRecognitionServices() {
    if (Platform.OS !== 'android') {
      invariant(
        Voice,
        'Speech recognition services can be queried for only on Android',
      );
      return;
    }

    return Voice.getSpeechRecognitionServices();
  }

  isRecognizing(): Promise<0 | 1> {
    return new Promise(resolve => {
      Voice.isRecognizing((isRecognizing: 0 | 1) => resolve(isRecognizing));
    });
  }

  set onSpeechStart(fn: (e: SpeechStartEvent) => void) {
    this._events.onSpeechStart = fn;
  }

  set onSpeechRecognized(fn: (e: SpeechRecognizedEvent) => void) {
    this._events.onSpeechRecognized = fn;
  }
  set onSpeechEnd(fn: (e: SpeechEndEvent) => void) {
    this._events.onSpeechEnd = fn;
  }
  set onSpeechError(fn: (e: SpeechErrorEvent) => void) {
    this._events.onSpeechError = fn;
  }
  set onSpeechResults(fn: (e: SpeechResultsEvent) => void) {
    this._events.onSpeechResults = fn;
  }
  set onSpeechPartialResults(fn: (e: SpeechResultsEvent) => void) {
    this._events.onSpeechPartialResults = fn;
  }
  set onSpeechVolumeChanged(fn: (e: SpeechVolumeChangeEvent) => void) {
    this._events.onSpeechVolumeChanged = fn;
  }
}

export {
  SpeechEndEvent,
  SpeechErrorEvent,
  SpeechEvents,
  SpeechStartEvent,
  SpeechRecognizedEvent,
  SpeechResultsEvent,
  SpeechVolumeChangeEvent,
};
export default new RCTVoice();

HomeScreen.js

import { View, Text, Image, ScrollView, TouchableOpacity } from 'react-native'
import React, { useEffect, useState } from 'react';
import { widthPercentageToDP as wp, heightPercentageToDP as hp } from 'react-native-responsive-screen';
import { SafeAreaView } from 'react-native-safe-area-context'
import Features from '../components/features';
import {dummyMessages } from '../constants/data';
import Voice from '@react-native-community/voice';

export default function HomeScreen() {
  const [messages,setMessages]=useState(dummyMessages);
  const [recording,setRecording]=useState(false);
  const [speaking,setSpeaking]=useState(true);
  const [result,setResult] = useState('');

  const clear=()=>{
    setMessages([]);
  }

  const stopSpeaking=()=>{
    setSpeaking(false);
  }

  const speechStartHandler=e=>{
    console.log('speech start handler')
  }
  const speechEndHandler=e=>{
    setRecording(false);
    console.log('speech end handler')
  }
  const speechResultsHandler=e=>{
    console.log('voice event',e);
    const text=e.value[0];
    setResult(text);
  }

  const speechErrorHandler=e=>{
    console.log('speech error handler',e)
  }
  const startRecording = async()=>{
    setRecording(true);
    try{
      await Voice.start('en-GB');
    }catch(error){
      console.log('error in startRecording' ,error);
    }
  }
  const stopRecording = async()=>{
  
    try{
      await stop();
      setRecording(false);
    }catch(error){
      console.log('error in stop recoring',error);
    }
  }

  useEffect(()=>{
    Voice.onSpeechStart = speechStartHandler;
    Voice.onSpeechEnd = speechEndHandler;
    Voice.onSpeechResults = speechResultsHandler;
    Voice.onSpeechError=speechErrorHandler;

    return()=>{
      Voice.destroy().then(Voice.removeAllListeners);
    }
  },[])
  return (
    <View className="flex-1 bg-white">
      <SafeAreaView className="flex-1 flex mx-5">
        <View className="flex-row justify-center">
          <Image source={require('../../assets/images/bot.png')} style={{width:wp(25),height:wp(25)}}/>
        </View>
        {
          messages.length>0?(
            <View className="space-t-2 flex-1">
                <Text style={{fontSize:wp(5)}} className="text-gray-700 font-semibold ml-1">Assistant
                </Text>
                <View  style={{height:hp(58)}}
                className="bg-neutral-200 rounded-3xl p-4">
                <ScrollView
                bounces={false}
                className="space-y-4"
                showsVerticalScrollIndicator={false}
                >
                  {
                    messages.map((message,index)=>{
                        if(message.role=='assistant'){
                          if(message.content.includes('https')){
                            return(
                              <View key={index} className="flex-row justify-start">
                                <View className="p-2 flex rounded-2xl bg-emerald-100 rounded-tl-none">
                                  <Image 
                                  source={{uri:message.content}}
                                  className="rounded-2xl"
                                  resizeMode="contain"
                                  style={{height: wp(60), width:wp(60)}}/>
                                </View>
                              </View>
                            )
                          }else{
                            return(
                                <View
                                key={index} style={{width:wp(70)}} className="bg-emerald-100 rounded-xl p-2 rounded-tl-none">
                                  <Text>
                                    {message.content}
                                  </Text>
                                </View>
                            )
                          }
                        }else{
                          return(
                            <View key={index} className="flex-row justify-end">
                              <View style={{width:wp(70)}} className="bg-white rounded-xl p-2 rounded-tr-none">
                                <Text>
                                  {message.content}
                                </Text>
                              </View>

                            </View>
                          )
                        }
                    })
                  }
                </ScrollView>
                </View>
            </View>
          ):(
            <Features/>
          )
        }
        <View className="flex justify-center items-center">
         {
          recording?(
            <TouchableOpacity onPress={stopRecording}>
            <Image 
            className="rounded-full"
            source={require("../../assets/images/voiceLoading.gif")}
            style={{width:hp(10),height:hp(10)}}
            />
          </TouchableOpacity>
          ):(
            <TouchableOpacity onPress={startRecording}>
            <Image 
            className="rounded-full"
            source={require("../../assets/images/recordingIcon.png")}
            style={{width:hp(10),height:hp(10)}}
            />
          </TouchableOpacity>
          )
         }
         {
          messages.length>0&&(
            <TouchableOpacity
            onPress={clear} 
            className="bg-neutral-400 rounded-3xl p-2 absolute right-10">
              <Text className="text-white font-semibold">Clear</Text>

            </TouchableOpacity>
          )
         }
         {
          speaking&&(
            <TouchableOpacity
            onPress={stopSpeaking} 
            className="bg-red-400 rounded-3xl p-2 absolute left-10">
              <Text className="text-white font-semibold">Stop</Text>

            </TouchableOpacity>
          )
         }
        </View>
      </SafeAreaView>
    </View>
  )
}

欢迎屏幕.js

import { View, Text, Image, TouchableOpacity } from 'react-native'
import React from 'react'
import { SafeAreaView } from 'react-native-safe-area-context'

import { widthPercentageToDP as wp, heightPercentageToDP as hp } from 'react-native-responsive-screen';
import { useNavigation } from '@react-navigation/native';

export default function WelcomeScreen() {
  const navigation=useNavigation();
  return (
    <SafeAreaView className='flex-1 flex justify-around bg-white'>
      <View className='space-y-2'>
        <Text className='text-center tracking-wide text-gray-700 font-extrabold'>
          AYGEN
        </Text>
        <Text className='text-center font-thin text-4xl text-gray-500'>
          Future power
        </Text>
      </View>
      <View className="flex-row justify-center">
        <Image source={require('../../assets/images/welcome.png') } style={{width:wp(75),height:wp(75)}} />
      </View>
      <TouchableOpacity onPress={()=>navigation.navigate('Home')} className="bg-emerald-600 mx-9 p-4 rounded-2xl">
        <Text style={{fontSize:wp(6)}}className="text-center font-bold text-white text-2xl">Get Started</Text>
      </TouchableOpacity>
    </SafeAreaView>
  )
}

android react-native voice-recognition recording
1个回答
0
投票

看起来是在libs下寻找jar文件。您是否将此 jar 文件放在正确的位置? 关于麦克风 - 该应用程序是否确实收到了音频权限? 如果没有,请在安装“react-native-permissions”后尝试使用以下代码: ” 从“react-native-permissions”导入{检查、请求、权限、结果}; const 权限 = Platform.OS === 'ios' ?权限.IOS.麦克风:权限.ANDROID.RECORD_AUDIO; 等待请求(许可); ”

顺便说一句,您正在构建什么样的语音帮助?很长一段时间我试图在 React Native 中找到“语音激活”又名“关键字激活”、“短语识别”、“唤醒词”、“唤醒词检测”,但找不到任何东西。 我最终构建了自己的并将其发布在: https://github.com/frymanofer/ReactNative_WakeWordDetection 该工作仍在进行中。

© www.soinside.com 2019 - 2024. All rights reserved.