我正在尝试发展自己的 Alexa 技能。当我使用 Alexa Web 模拟器输入它时,它可以找到并识别我的意图,但是当我使用我的移动应用程序尝试它时(我使用与 Web 控制台中相同的帐户登录),它无法识别我的意图。我的代码如下:
# -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Welcome, you can say Hello or Help. Which would you like to try?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class HelloWorldIntentHandler(AbstractRequestHandler):
"""Handler for Hello World Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("HelloWorldIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Hello World!"
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "You can say hello to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Single handler for Fallback Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
speech = "Hmm, I'm not sure. You can say Hello or Help. What would you like to do?"
reprompt = "I didn't catch that. What can I help you with?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CajaVecinaHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return ask_utils.is_intent_name("CajaVecina")(handler_input)
def handle(self, handler_input):
logger.info("Llamando a caja vecina")
logger.info(handler_input.request_envelope.context)
# isGeoSupported = context.System.device.supportedInterfaces.Geolocation;
# geoObject = context.Geolocation;
speak_output = "La caja vecina más cercana es la del paso los trapenses."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(HelloWorldIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(CajaVecinaHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
当我说“caja vecina”时,意图就会触发,当我在网络模拟器上输入它时,它工作正常,但当我使用移动应用程序时,它无法识别意图。该技能确实出现在应用程序的“我的技能”中。这就是我的应用程序的样子app Alexa
您的交互模型是什么样的?很多时候,当您遇到这样的情况时,您在模拟器中输入的内容有效,但您所说的内容无效,这可能是 Alexa 识别语音时出现问题。尝试向您的交互模型中添加更多示例话语以实现该意图。
例如,如果我只有一个示例话语“hotdog”的意图,并且如果我在网络模拟器中输入“hotdog”,那么 Alexa 在这种情况下很可能会做正确的事情,因为“hotdog”当然完全匹配。但如果我在使用应用程序或设备时说出这个短语,Alexa 实际上可能会听到带有空格的“热狗”。在这种情况下,添加“热狗”作为示例将有助于发现并纠正问题。
我在以某种方式结合了托管和自定义技能后得到了这个。 “代码”选项卡包含 hello world 代码,但即使在删除和重建之后,技能仍然有错误。最后我不得不删除然后重新创建技能。