我想在C#中使用Google Speech Recognition API Rest。我想使用API Rest而不是客户端库的原因是因为我想使用Unity3D,它目前不支持客户端库。
我使用HttpClient连接和Newtonsoft Json来序列化json(我现在正在使用Windows窗体,当它在Winforms中工作时将移动到Unity)。
我总是收到来自Google的错误请求回复,它没有提供更多详细信息,但我注意到如果我将API密钥值更改为无效,我会得到相同的结果。
这是我的代码:
课程:
class Speech
{
public RecognitionConfig config { get; set; }
public RecognitionAudio audio { get; set; }
public bool sendToApi(string baseUri, string url, ref string apiResponse)
{
try
{
HttpClient client = new HttpClient();
// Update port # in the following line.
client.BaseAddress = new Uri(baseUri);
client.DefaultRequestHeaders.Accept.Clear();
client.DefaultRequestHeaders.Accept.Add(new System.Net.Http.Headers.MediaTypeWithQualityHeaderValue("application/json"));
//var speechJson = new JavaScriptSerializer().Serialize(certificado);
string speechJson = JsonConvert.SerializeObject(this);
var contenido = new StringContent(speechJson.ToString(), Encoding.UTF8, "application/json");
HttpResponseMessage response = client.PostAsync(url, contenido).Result;
if (response.IsSuccessStatusCode)
{
string responseJson = response.Content.ReadAsStringAsync().Result;
apiResponse = responseJson;
}
else
{
apiResponse = "ERROR " + JsonConvert.DeserializeObject(JsonConvert.SerializeObject(response));
}
return true;
}
catch (Exception e)
{
apiResponse = e.Message;
return false;
}
}
}
class RecognitionConfig
{
public string encoding { get; set; }
public int sampleRateHertz { get; set; }
public string languageCode { get; set; }
// public int maxAlternatives { get; set; }
// public bool profanityFilter { get; set; }
// public List<SpeechContext> speechContexts { get; set; }
// public bool enableWordTimeOffsets { get; set; }
}
class SpeechContext
{
public List<string> phrases { get; set; }
}
class RecognitionAudio
{
public string content { get; set; }
// public string uri { get; set; }
public bool setContentBase64FromAudio(string path)
{
try
{
FileStream fileStream = File.OpenRead(path);
MemoryStream memoryStream = new MemoryStream();
memoryStream.SetLength(fileStream.Length);
fileStream.Read(memoryStream.GetBuffer(), 0, (int)fileStream.Length);
byte[] BA_AudioFile = memoryStream.GetBuffer();
this.content = System.Convert.ToBase64String(BA_AudioFile);
return true;
}
catch(Exception e)
{
return false;
}
}
}
电话:
private void button1_Click(object sender, EventArgs e)
{
Speech speech = new Speech();
speech.config = new RecognitionConfig();
speech.audio = new RecognitionAudio();
speech.config.encoding = "FLAC";
speech.config.sampleRateHertz = 44100;
speech.config.languageCode = "en-US";
RecognitionAudio audio = new RecognitionAudio();
audio.setContentBase64FromAudio("C:\\Users\\Manena\\Downloads\\good-morning-google.flac");
speech.audio = audio;
string response = "";
speech.sendToApi("https://speech.googleapis.com/", "v1/speech:recognize?key=<mykey>", ref response);
textBox1.Text = response;
}
}
编辑:这是我发送的Json:
{
"config":{
"encoding":"FLAC",
"sampleRateHertz":44100,
"languageCode":"en-US"
},
"audio":{
"content":"base64 audio"
}
}
而我收到的是:
{
"Version": {
"Major": 1,
"Minor": 1,
"Build": -1,
"Revision": -1,
"MajorRevision": -1,
"MinorRevision": -1
},
"Content": {
"Headers": [
{
"Key": "Content-Type",
"Value": [
"application/json; charset=UTF-8"
]
}
]
},
"StatusCode": 400,
"ReasonPhrase": "Bad Request",
"Headers": [
{
"Key": "Vary",
"Value": [
"X-Origin",
"Referer",
"Origin",
"Accept-Encoding"
]
},
{
"Key": "X-XSS-Protection",
"Value": [
"1; mode=block"
]
},
{
"Key": "X-Frame-Options",
"Value": [
"SAMEORIGIN"
]
},
{
"Key": "X-Content-Type-Options",
"Value": [
"nosniff"
]
},
{
"Key": "Alt-Svc",
"Value": [
"hq=\":443\"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=\":443\"; ma=2592000; v=\"41,39,38,37,35\""
]
},
{
"Key": "Transfer-Encoding",
"Value": [
"chunked"
]
},
{
"Key": "Accept-Ranges",
"Value": [
"none"
]
},
{
"Key": "Cache-Control",
"Value": [
"private"
]
},
{
"Key": "Date",
"Value": [
"Sat, 30 Dec 2017 09:06:19 GMT"
]
},
{
"Key": "Server",
"Value": [
"ESF"
]
}
],
"RequestMessage": {
"Version": {
"Major": 1,
"Minor": 1,
"Build": -1,
"Revision": -1,
"MajorRevision": -1,
"MinorRevision": -1
},
"Content": {
"Headers": [
{
"Key": "Content-Type",
"Value": [
"application/json; charset=utf-8"
]
},
{
"Key": "Content-Length",
"Value": [
"106"
]
}
]
},
"Method": {
"Method": "POST"
},
"RequestUri": "https://speech.googleapis.com/v1/speech:recognize?key=mykey",
"Headers": [
{
"Key": "Accept",
"Value": [
"application/json"
]
}
],
"Properties": {}
},
"IsSuccessStatusCode": false
}
我知道我的代码不是最优雅的,但是现在我只对从Google API获得良好响应感兴趣。任何线索?
我已经解决了这个问题。
问题是我使用的是2声道音频文件,而Google Speech API目前只接受单声道音频。
因此,问题中的代码适用于1声道音频,它可能对某人有用
谢谢