✅ Múltiplos atendentes - 1 número, vários usuários + DialogFlow Reconhecendo Áudios - SUPORTE 15 99856-6622 (wa.me/5515998566622)
📁 Arquivos disponíveis para download nessa aula
👉 Instruções Vídeo 1
WHATICKET + RECONHECIMENTO DE AUDIO
1- INSTALAR O WHATICKET
2- INSTALAR LIBS
npm install @google-cloud/dialogflow
npm install actions-on-google
npm install util
npm install pb-util
3- CONFIGURAR JSON COM AS CREDENCIAIS DE ACESSO
4- CONFIGURAR O WBOT.JS
const express = require('express');
const app2 = express();
const {WebhookClient} = require('@google-cloud/dialogflow');
const dialogflow = require('@google-cloud/dialogflow');
const util = require("util");
const {struct} = require("pb-util");
const fs = require('fs');
//webhook dialogflow
app2.post('/webhook', function(request,response){
const agent = new WebhookClient ({ request, response });
let intentMap = new Map();
intentMap.set('nomedaintencao', nomedafuncao)
agent.handleRequest(intentMap);
});
function nomedafuncao (agent) {
}
app2.use(express.json());
app2.use(express.urlencoded({
extended: true
}));
async function detectAudioIntent(
projectId,
sessionId,
filename,
encoding,
sampleRateHertz,
languageCode
) {
const sessionClient = new dialogflow.SessionsClient({keyFilename: "zdg-9un9-0aba54d6e44c.json"});
const sessionPath = sessionClient.projectAgentSessionPath(
projectId,
sessionId
);
const readFile = util.promisify(fs.readFile);
const inputAudio = await readFile(filename);
const request = {
session: sessionPath,
queryInput: {
audioConfig: {
audioEncoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
},
},
inputAudio: inputAudio,
};
const [response] = await sessionClient.detectIntent(request);
if (response.queryResult.parameters === null){
console.log('Intent não detectada.');
return;
}
else {
console.log('Detected intent:');
const result = response.queryResult;
const contextClient = new dialogflow.ContextsClient();
console.log(` Query: ${result.queryText}`);
console.log(` Response: ${result.fulfillmentText}`);
if (result.intent) {
console.log(` Intent: ${result.intent.displayName}`);
} else {
console.log('No intent matched.');
}
const parameters = JSON.stringify(struct.decode(result.parameters));
console.log(` Parameters: ${parameters}`);
if (result.outputContexts && result.outputContexts.length) {
console.log(' Output contexts:');
result.outputContexts.forEach(context => {
const contextId = contextClient.matchContextFromProjectAgentSessionContextName(
context.name
);
const contextParameters = JSON.stringify(
);
console.log(` ${contextId}`);
console.log(` lifespan: ${context.lifespanCount}`);
console.log(` parameters: ${contextParameters}`);
});
}
return `${result.fulfillmentText}`
}
}
wbot.on('message', async msg => {
if(msg.hasMedia && msg.type === "ptt") {
const mediaPergunta = await msg.downloadMedia();
const base64data = mediaPergunta.data;
fs.writeFileSync('A' + msg.from + '.ogg', Buffer.from(base64data.replace('data:audio/ogg; codecs=opus;base64,', ''), 'base64'));
let audioResposta = await detectAudioIntent("zdg-9un9", msg.from, 'A' + msg.from + '.ogg', 'AUDIO_ENCODING_OGG_OPUS', '16000', 'pt-BR')
try {
msg.reply(audioResposta.replace(/\\n/g, '\n'));
}
catch {
msg.reply('Não identifiquei sua pergunta. Poderia repetir?');
}
}
else{
console.log("Não recebi um arquivo de voz.");
}
});