如何在我的
deepgram
项目中使用 React-Native
?我在使用移动直播音频中的 deepgram
api 到 livestream
时遇到了问题。显示
[错误:未处理的错误。 (预期为 HTTP 101 响应,但结果为“401 未经授权”)]
这是我的 (app/index.tsx) 中的代码,我用来在我的 React Native Expo 应用程序中实现 Deepgram。如何集成 Deepgram? 代码如下。
import { Button, NativeEventEmitter, StyleSheet } from "react-native";
import {
CreateProjectKeyResponse,
LiveClient,
LiveTranscriptionEvents,
createClient,
} from "@deepgram/sdk";
import { Text, View } from "@/components/Themed";
import { Audio } from "expo-av";
import { useEffect, useState } from "react";
export default function TabOneScreen() {
const fetch = require("cross-fetch");
const eventEmitter = new NativeEventEmitter();
const [recording, setRecording] = useState<any>();
const [recordings, setRecordings] = useState<any>([]);
const [key, setKey] = useState<any>([]);
const [permissionResponse, requestPermission] = Audio.usePermissions();
const [apiKey, setApiKey] = useState<CreateProjectKeyResponse | null>();
const [connection, setConnection] = useState<LiveClient | null>();
const [caption, setCaption] = useState<string | null>();
const [isListening, setListening] = useState(false);
const deepgram = createClient("DEEPGRAM_API_KEY");
async function startRecording() {
try {
if (permissionResponse?.status !== "granted") {
console.log("Requesting permission..");
await requestPermission();
}
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
playsInSilentModeIOS: true,
});
const { recording } = await Audio.Recording.createAsync(
Audio.RecordingOptionsPresets.HIGH_QUALITY
);
setRecording(recording);
console.log("Recording started");
} catch (err) {
console.error("Failed to start recording", err);
}
}
async function stopRecording() {
setRecording(undefined);
await recording?.stopAndUnloadAsync();
let allRecordings = [...recordings];
const { sound, status } = await recording?.createNewLoadedSoundAsync();
allRecordings.push({
sound: sound,
duration: getDurationFormatted(status.durationMillis),
file: recording.getURI(),
});
setConnection(deepgram.listen.live({ model: "nova", smart_format: true }));
connection?.on(LiveTranscriptionEvents.Open, () => {
connection.on(LiveTranscriptionEvents.Close, () => {
console.log("Connection closed.");
});
connection.on(LiveTranscriptionEvents.Metadata, (data) => {
console.log(data);
});
connection.on(LiveTranscriptionEvents.Transcript, (data) => {
console.log(data);
});
});
setRecordings(allRecordings);
}
function getDurationFormatted(milliseconds: number) {
const minutes = milliseconds / 1000 / 60;
const seconds = Math.round((minutes - Math.floor(minutes)) * 60);
return seconds < 10
? `${Math.floor(minutes)}:0${seconds}`
: `${Math.floor(minutes)}:${seconds}`;
}
function getRecordingLines() {
return recordings.map((recordingLine: any, index: number) => {
return (
<View key={index} style={styles.row}>
<Text style={styles.fill}>
Recording #{index + 1} | {recordingLine.duration}
</Text>
<Button
onPress={async () => await recordingLine.sound.replayAsync()}
title="Play"
></Button>
</View>
);
});
}
function clearRecordings() {
setRecordings([]);
}
return (
<View style={styles.container}>
<Text style={styles.title}>Record Audio</Text>
<View
style={styles.separator}
lightColor="#aae"
darkColor="rgba(255,255,255,0.1)"
/>
<Button
title={recording ? "Stop Recording" : "Start Recording"}
onPress={recording ? stopRecording : startRecording}
/>
{getRecordingLines()}
{recordings.length > 0 ? (
<Button title={"Clear Recordings"} onPress={clearRecordings} />
) : (
<View />
)}
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: "center",
justifyContent: "flex-start",
},
title: {
fontSize: 20,
fontWeight: "bold",
},
separator: {
marginVertical: 30,
height: 1,
width: "80%",
},
row: {
flexDirection: "row",
alignItems: "center",
justifyContent: "center",
marginLeft: 10,
marginRight: 40,
},
fill: {
flex: 1,
margin: 15,
},
});
我也遇到了同样的问题,最后我用axios代替了deepgram包,和buffer:
async function stopRecordingAPI() {
setRecording(undefined);
await recording.stopAndUnloadAsync();
console.log('Stopping recording..');
const uri = recording.getURI();
const file = FileSystem.getInfoAsync(uri);
file.then(async (info) => {
console.log(info);
});
const fileContent = await readFileAsString(uri);
const Buffer = require('buffer').Buffer;
const buffer = Buffer.from(fileContent, 'base64');
axios.post(
'https://api.deepgram.com/v1/listen?model=nova-2&smart_format=true&detect_language=true',
buffer,
{
headers: {
'Authorization': "Token abcabcabcabcabcabcabcabcabc",
'Content-Type': 'audio/m4a'
}
}
).then((response) => {
console.log(response)
//work with the response
}).catch((error) => {
console.error(error);
});