How to Build a Basic Chatbot Using TensorFlow and JavaScript
We all come across chatbots when visiting various sites, while some of them operate behind real-human interaction, others are powered by AI.
In this article, we'll walk through building a simple AI-powered chatbot using TensorFlow and JavaScript. The chatbot will recognize user commands and respond with predefined answers.
Step-by-Step Guide
Setting Up Our Project First, we create a new directory for our project and initialize it with npm, ensure you have Node.js installed on your system before starting this step.
mkdir chatbot
cd chatbot
npm init -y
Install necessary packages
We would be using the following npm packages for our simple project:
npm install @tensorflow/tfjs @tensorflow-models/universal sentence-encoder
Create a file named intents.js to store intents/commands, These are categories of user inputs that the chatbot will recognize (e.g., greetings, product inquiries, order status).
// intents.js
const intents = {
greeting: ["hello", "hi", "hey", "good morning", "good evening", "howdy"],
goodbye: ["bye", "goodbye", "see you later", "farewell", "catch you later"],
thanks: ["thank you", "thanks", "much appreciated", "thank you very much"],
product_inquiry: ["tell me about your products", "what do you sell?", "product information", "what can I buy?", "show me your products"],
order_status: ["where is my order?", "order status", "track my order", "order tracking", "order update"],
shipping_info: ["shipping information", "how do you ship?", "shipping methods", "delivery options", "how long does shipping take?"],
return_policy: ["return policy", "how to return?", "return process", "can I return?", "returns"],
payment_methods: ["payment options", "how can I pay?", "payment methods", "available payments"],
support_contact: ["contact support", "how to contact support?", "customer support contact", "support info", "customer service contact"],
business_hours: ["business hours", "working hours", "when are you open?", "opening hours", "store hours"]
};
module.exports = { intents }
Create another file named responses.js to store predefined responses, These are the predefined responses the chatbot will give based on the recognized intent.
// responses.js
const responses = {
greeting: "Hello! How can I help you today?",
goodbye: "Goodbye! Have a great day!",
thanks: "You're welcome! If you have any other questions, feel free to ask.",
product_inquiry: "We offer a variety of products including electronics, books, clothing, and more. How can I assist you further?",
order_status: "Please provide your order ID, and I will check the status for you.",
shipping_info: "We offer various shipping methods including standard, express, and next-day delivery. Shipping times depend on the method chosen and your location.",
return_policy: "Our return policy allows you to return products within 30 days of purchase. Please visit our returns page for detailed instructions.",
payment_methods: "We accept multiple payment methods including credit/debit cards, PayPal, and bank transfers. Please choose the method that suits you best at checkout.",
support_contact: "You can contact our support team via email at [email protected] or call us at 1-800-123-4567.",
business_hours: "Our business hours are Monday to Friday, 9 AM to 5 PM. We are closed on weekends and public holidays."
};
module.exports = { responses };
Create a main script file named chatbot.js and load the necessary libraries and models, we load the universal sentence encoder model asynchronously and start the chatbot once the model is loaded.
领英推荐
// chatbot.js
const tf = require('@tensorflow/tfjs');
const use = require('@tensorflow-models/universal-sentence-encoder');
const { intents } = require('./intents');
const { responses } = require('./responses');
const readline = require('readline');
// Load the Universal Sentence Encoder model
let model;
use.load().then((loadedModel) => {
model = loadedModel;
console.log("Model loaded");
startChatbot();
});
Add a function to recognize the intent of the user's input, we embed the user input into a high-dimensional vector using the universal encoder and then track the highest similarity score based on the intent.
async function recognizeIntent(userInput) {
const userInputEmb = await model.embed([userInput]);
let maxScore = -1;
let recognizedIntent = null;
for (const [intent, examples] of Object.entries(intents)) {
// Embedding the example phrases for each intent & Calculating similarity scores between the user input embedding and the example embeddings
const examplesEmb = await model.embed(examples);
const scores = await tf.matMul(userInputEmb, examplesEmb, false, true).data();
const maxExampleScore = Math.max(...scores);
if (maxExampleScore > maxScore) {
maxScore = maxExampleScore;
recognizedIntent = intent;
}
}
return recognizedIntent;
}
Add a function to generate responses based on the recognized intent:
async function generateResponse(userInput) {
const intent = await recognizeIntent(userInput);
if (intent && responses[intent]) {
return responses[intent];
} else {
return "I'm sorry, I don't understand that. Can you please rephrase?";
}
}
Finally, implement the interaction loop with the chatbot by setting up the interface for reading user input from the command line, prompting the user for input and generating responses accordingly:
function startChatbot() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("Welcome to the customer service chatbot! Type 'quit' to exit.");
rl.prompt();
rl.on('line', async (line) => {
const userInput = line.trim();
if (userInput.toLowerCase() === 'quit') {
console.log("Chatbot: Goodbye!");
rl.close();
return;
}
const response = await generateResponse(userInput);
console.log(`Chatbot: ${response}`);
rl.prompt();
});
}
Here is the completed code for chatbot.js:
// chatbot.js
const tf = require('@tensorflow/tfjs');
const use = require('@tensorflow-models/universal-sentence-encoder');
const { intents } = require('./intents');
const { responses } = require('./responses');
const readline = require('readline');
// Load the Universal Sentence Encoder model
let model;
use.load().then((loadedModel) => {
model = loadedModel;
console.log("Model loaded");
startChatbot();
});
async function recognizeIntent(userInput) {
const userInputEmb = await model.embed([userInput]);
let maxScore = -1;
let recognizedIntent = null;
for (const [intent, examples] of Object.entries(intents)) {
const examplesEmb = await model.embed(examples);
const scores = await tf.matMul(userInputEmb, examplesEmb, false, true).data();
const maxExampleScore = Math.max(...scores);
if (maxExampleScore > maxScore) {
maxScore = maxExampleScore;
recognizedIntent = intent;
}
}
return recognizedIntent;
}
async function generateResponse(userInput) {
const intent = await recognizeIntent(userInput);
if (intent && responses[intent]) {
return responses[intent];
} else {
return "I'm sorry, I don't understand that. Can you please rephrase?";
}
}
function startChatbot() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("Welcome to the customer service chatbot! Type 'quit' to exit.");
rl.prompt();
rl.on('line', async (line) => {
const userInput = line.trim();
if (userInput.toLowerCase() === 'quit') {
console.log("Chatbot: Goodbye!");
rl.close();
return;
}
const response = await generateResponse(userInput);
console.log(`Chatbot: ${response}`);
rl.prompt();
});
}
node chatbot.js
Conclusion
In this article, we've built a simple customer service chatbot using TensorFlow and JavaScript. While this implementation is basic, it provides a solid foundation for building more sophisticated chatbots. You can expand this project by integrating APIs using AXIOS, adding more intents and responses, or deploying it on a web platform.
Happy coding!