为什么我的话语没有触发意图?



当我说get me in end时,它不会触发我想要触发的意图。

/* *

  • 这个示例演示了使用Alexa技能工具包SDK (v2)处理Alexa技能的意图。
  • 请访问https://alexa.design/cookbook获取更多关于实现插槽,对话框管理的示例,
  • 会话持久化、api调用等。
  • */

const Alexa = require('ask-sdk-core');
const msg = require("./localisation");
const constants = require("./constants");
const getSlotValues = (handlerInput) => {
return handlerInput.requestEnvelope.request.intent.slots;
}
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = msg.HELLO_MSG;
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
//If user the say follow up, we are going to ask another question.
const FollowUpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'FollowUpIntentHandler';
},
handle(handlerInput) {
const {responseBuilder, attributesManager} = handlerInput;
const slotValue = getSlotValues(handlerInput);
const sessionAttribute = attributesManager.getSessionAttributes();
sessionAttribute['follow']= slotValue.follow.value;
// const sessionAttribute = attributeManager.getSessionAttributes();
// const slotValues = getSlotValues(handlerInput);
// sessionAttribute["follow"] = slotValues.follow.value;
const speakOutput = msg.FOLLOW_UP_QUESTION_MSG;
return responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
//If user the say new we are going to ask another question.
const NewIntentHandler = {
canHandle(handlerInput){
const {request} = handlerInput.requestEnvelope;
return request.type === 'IntentRequest'
&& request.intent.name === "NewIntentHandler";
},
handle(handlerInput){
const {responseBuilder, attributeManager} = handlerInput;
const sessionAttribute = attributeManager.getSessionAttributes();
const slotValues = getSlotValues(handlerInput);
sessionAttribute["new"] = slotValues.new.value;

const speakOutput = msg.NEW_QUESTION_MSG;

return responseBuilder.speak(speakOutput).reprompt(speakOutput).getResponse();
}   
};
const CaptureUserQuestionIntentHandler = {
canHandle(handlerInput){
const {request} = handlerInput.requestEnvelope;
const {attributesManager} = handlerInput;
const sessionAttribute = attributesManager.getSessionAttributes();
return request.type === "IntentRequest"
&& request.intent.name === "CaptureUserQuestionIntentHandler"
|| sessionAttribute['new']
|| sessionAttribute['follow'];
},
handle(handlerInput){
const slotValue = getSlotValues(handlerInput);
const {responseBuilder, attributesManager} = handlerInput;
const sessionAttribute = attributesManager.getSessionAttributes();

sessionAttribute["bodyPart"] = slotValue.bodyPart.value;
sessionAttribute["drainage"] = slotValue.drainage.value;
sessionAttribute["time"] = slotValue.time.value;

//   const bodyPart = slotValue.bodyPart.value;
//   const time = slotValue.time.value;
//   const drainage = slotValue.drainage.value;
const speakOutput = `What is the ${sessionAttribute['drainage']} coming from? The back of the ear near the incision or the opening of your ear or ear canal?`;

return responseBuilder
.speak(speakOutput)
//.reprompt(speakOutput)
.getResponse()
}
};
const BackOrNearIncisionIntentHandler = {
canHandle(handlerInput){
const {request} = handlerInput.requestEnvelope;
const {attributesManager} =  handlerInput;
const sessionAttribute = attributesManager.getSessionAttributes();

return request.type === "IntentRequest"
&& request.intent.name === "BackOrNearIncisionIntentHandler"
&& sessionAttribute["drainage"];
},
handle(handlerInput){
const slotValues = getSlotValues(handlerInput);
const {attributesManager, responseBuilder} = handlerInput;
const sessionAttribute = attributesManager.getSessionAttributes();

let speakOutput;

sessionAttribute['color'] = slotValues.color.value;
const color = sessionAttribute['color'];

if(color === constants.CLEAR){
speakOutput = msg.IMPORANT_GET_TREATED; 
}else{
speakOutput = msg.SUGGEST_APPLY_MEDICINE;
}

return responseBuilder
.speak(speakOutput)
//.reprompt(speakOutput)
.getResponse()

}
};
const YesPleaseIntentHandler = {
canHandle(handlerInput){
const {request} = handlerInput.requestEnvelope;
const {attributesManager} = handlerInput;
const sessionAttribute = attributesManager.getSessionAttributes();


return request.type === "IntentRequest"
&& request.intent.name === "AMAZON.YesIntent"
&& sessionAttribute["color"] === constants.CLEAR;
},
handle(handlerInput){
const {responseBuilder} = handlerInput;
// let speakOutput = msg.YOUR_NEXT_APPOINTMENT;
let speakOutput = msg.YOUR_NEXT_APPOINTMENT;

return responseBuilder
.speak(speakOutput)
//.reprompt(speakOutput)
.getResponse()
}
};
const UserIntentHandlers = {
canHandle(handlerInput){
const {request} = handlerInput.requestEnvelope;


return request.type === "IntentRequest"
&& request.intent.name === "UserIntentHandler";
},
handle(handlerInput){
const {responseBuilder} = handlerInput;
// let speakOutput = msg.YOUR_NEXT_APPOINTMENT;
let speakOutput = msg.YOUR_NEXT_APPOINTMENT;
console.log(responseBuilder);
return responseBuilder
.speak(speakOutput)
//.reprompt(speakOutput)
.getResponse()
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet 
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don't know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open 
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not 
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs 
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents 
* by defining them above, then also adding them to the request handler chain below 
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below 
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom 
* */
exports.handler = Alexa.SkillBuilders.custom()
.addRequestHandlers(
LaunchRequestHandler,
YesPleaseIntentHandler,
FollowUpIntentHandler,
BackOrNearIncisionIntentHandler,
NewIntentHandler,
CaptureUserQuestionIntentHandler,
UserIntentHandlers,



HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/hello-world/v1.2')
.lambda();
{
"interactionModel": {
"languageModel": {
"invocationName": "lee healthcare",
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "FollowUpIntentHandler",
"slots": [
{
"name": "follow",
"type": "FOLLOW"
}
],
"samples": [
"{follow} up question",
"{follow} up",
"{follow}"
]
},
{
"name": "NewIntentHandler",
"slots": [
{
"name": "new",
"type": "NEW"
}
],
"samples": [
"{new} question",
"{new}"
]
},
{
"name": "CaptureUserQuestionIntentHandler",
"slots": [
{
"name": "bodyPart",
"type": "BODYPARTS"
},
{
"name": "time",
"type": "customDate",
"samples": [
"{time}"
]
},
{
"name": "drainage",
"type": "DRAIN",
"samples": [
"{drainage}"
]
}
],
"samples": [
"I have {bodyPart} surgery {time} and have {drainage}"
]
},
{
"name": "BackOrNearIncisionIntentHandler",
"slots": [
{
"name": "color",
"type": "AMAZON.Color",
"samples": [
"{color}"
]
}
],
"samples": [
"{color}",
"near the incision",
"back of the ear near the incision"
]
},
{
"name": "AMAZON.YesIntent",
"samples": []
},
{
"name": "UserIntentHandler",
"slots": [],
"samples": [
"get me"
]
}
],
"types": [
{
"name": "FOLLOW",
"values": [
{
"name": {
"value": "follow"
}
}
]
},
{
"name": "NEW",
"values": [
{
"name": {
"value": "new"
}
}
]
},
{
"name": "BODYPARTS",
"values": [
{
"name": {
"value": "ear"
}
}
]
},
{
"name": "DRAIN",
"values": [
{
"name": {
"value": "drainage"
}
}
]
},
{
"name": "customDate",
"values": [
{
"name": {
"value": "last week"
}
}
]
},
{
"name": "AMAZON.Color",
"values": [
{
"name": {
"value": "clear"
}
}
]
}
]
},
"dialog": {
"intents": [
{
"name": "CaptureUserQuestionIntentHandler",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "bodyPart",
"type": "BODYPARTS",
"confirmationRequired": false,
"elicitationRequired": false,
"prompts": {}
},
{
"name": "time",
"type": "customDate",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.832794320406.761354353518"
}
},
{
"name": "drainage",
"type": "DRAIN",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.832794320406.873931110175"
}
}
]
},
{
"name": "BackOrNearIncisionIntentHandler",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "color",
"type": "AMAZON.Color",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.414884367204.126479337664"
}
}
]
}
],
"delegationStrategy": "ALWAYS"
},
"prompts": [
{
"id": "Slot.Validation.544061479456.1369268390684.618525999294",
"variations": [
{
"type": "PlainText",
"value": "Can i help you with the follow question or new question?"
}
]
},
{
"id": "Elicit.Slot.832794320406.761354353518",
"variations": [
{
"type": "PlainText",
"value": "When does it happen?"
}
]
},
{
"id": "Elicit.Slot.832794320406.873931110175",
"variations": [
{
"type": "PlainText",
"value": "What else do you have ?"
}
]
},
{
"id": "Elicit.Slot.414884367204.126479337664",
"variations": [
{
"type": "PlainText",
"value": "What color is drainage?"
}
]
}
]
}
}

这个问题还在吗?当我用你的代码和交互模型创造了一项新技能时,"get me"成功触发userinternthandlers。触发了什么意图?

在Alexa开发者控制台,在"构建"选项卡,单击"意图"。在侧边菜单中。你会看到一个"评估模型"按钮靠近右上角。点击它,选择"语音分析器"。你可以看到当你说"get me"时触发了什么意图。

最新更新