Building AI-Powered Applications with LangChain, OpenAI, and NestJS
Learn how to build intelligent applications by integrating LangChain and OpenAI with NestJS, including conversation chains, embeddings, and vector stores for advanced AI capabilities.
LangChain has emerged as a powerful framework for building applications with Large Language Models (LLMs). In this comprehensive guide, we'll explore how to integrate LangChain with OpenAI in a NestJS application to create sophisticated AI-powered features.
Setting Up the Project
npm install @nestjs/common @nestjs/config langchain openai @pinecone-database/pinecone
Configuring LangChain with NestJS
// src/ai/ai.module.ts
import { Module } from '@nestjs/common';
import { ConfigModule } from '@nestjs/config';
import { AiService } from './ai.service';
@Module({
imports: [
ConfigModule.forRoot({
isGlobal: true,
}),
],
providers: [AiService],
exports: [AiService],
})
export class AiModule {}
Implementing the AI Service
// src/ai/ai.service.ts
import { Injectable, OnModuleInit } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { OpenAI } from 'langchain/llms/openai';
import { ConversationChain } from 'langchain/chains';
import { BufferMemory } from 'langchain/memory';
import { PineconeStore } from 'langchain/vectorstores/pinecone';
import { OpenAIEmbeddings } from 'langchain/embeddings/openai';
import { PineconeClient } from '@pinecone-database/pinecone';
@Injectable()
export class AiService implements OnModuleInit {
private model: OpenAI;
private chain: ConversationChain;
private vectorStore: PineconeStore;
constructor(private configService: ConfigService) {
this.model = new OpenAI({
openAIApiKey: this.configService.get('OPENAI_API_KEY'),
modelName: 'gpt-4',
temperature: 0.7,
});
}
async onModuleInit() {
// Initialize conversation chain
this.chain = new ConversationChain({
llm: this.model,
memory: new BufferMemory(),
});
// Initialize vector store
await this.initVectorStore();
}
private async initVectorStore() {
const client = new PineconeClient();
await client.init({
apiKey: this.configService.get('PINECONE_API_KEY'),
environment: this.configService.get('PINECONE_ENVIRONMENT'),
});
const pineconeIndex = client.Index(
this.configService.get('PINECONE_INDEX')
);
this.vectorStore = await PineconeStore.fromExistingIndex(
new OpenAIEmbeddings({
openAIApiKey: this.configService.get('OPENAI_API_KEY'),
}),
{ pineconeIndex }
);
}
async generateResponse(input: string): Promise<string> {
const response = await this.chain.call({ input });
return response.response;
}
async similaritySearch(query: string, k: number = 5) {
return this.vectorStore.similaritySearch(query, k);
}
async addToKnowledgeBase(text: string, metadata?: Record<string, any>) {
const embeddings = new OpenAIEmbeddings({
openAIApiKey: this.configService.get('OPENAI_API_KEY'),
});
await this.vectorStore.addDocuments([
{ pageContent: text, metadata },
]);
}
}
领英推荐
Creating the Controller
// src/ai/ai.controller.ts
import { Controller, Post, Body, Get, Query } from '@nestjs/common';
import { AiService } from './ai.service';
@Controller('ai')
export class AiController {
constructor(private readonly aiService: AiService) {}
@Post('chat')
async chat(@Body('message') message: string) {
return {
response: await this.aiService.generateResponse(message),
};
}
@Get('search')
async search(
@Query('query') query: string,
@Query('limit') limit: number = 5,
) {
return {
results: await this.aiService.similaritySearch(query, limit),
};
}
@Post('knowledge')
async addKnowledge(
@Body('text') text: string,
@Body('metadata') metadata?: Record<string, any>,
) {
await this.aiService.addToKnowledgeBase(text, metadata);
return { success: true };
}
}
Implementing Advanced Features
// src/ai/ai.service.ts
import { PromptTemplate } from 'langchain/prompts';
import { LLMChain } from 'langchain/chains';
@Injectable()
export class AiService {
// ... existing code ...
async generateStructuredResponse<T>(
input: string,
template: string,
outputSchema: z.ZodType<T>
): Promise<T> {
const prompt = new PromptTemplate({
template,
inputVariables: ['input'],
});
const chain = new LLMChain({
llm: this.model,
prompt,
outputParser: new StructuredOutputParser(outputSchema),
});
return chain.call({ input });
}
async generateWithFunctions(
input: string,
functions: ChatCompletionFunctions[]
) {
const model = new OpenAI({
openAIApiKey: this.configService.get('OPENAI_API_KEY'),
modelName: 'gpt-4',
functions,
functionCall: 'auto',
});
return model.call(input);
}
}
Error Handling and Rate Limiting
// src/ai/ai.service.ts
import { RateLimiter } from 'limiter';
@Injectable()
export class AiService {
private limiter: RateLimiter;
constructor(private configService: ConfigService) {
// ... existing initialization ...
this.limiter = new RateLimiter({
tokensPerInterval: 50,
interval: 'minute',
});
}
async generateResponse(input: string): Promise<string> {
try {
await this.limiter.removeTokens(1);
const response = await this.chain.call({ input });
return response.response;
} catch (error) {
if (error instanceof Error) {
if (error.message.includes('rate_limit')) {
throw new HttpException(
'Rate limit exceeded',
HttpStatus.TOO_MANY_REQUESTS
);
}
throw new HttpException(
'AI service error',
HttpStatus.INTERNAL_SERVER_ERROR
);
}
throw error;
}
}
}
Best Practices
By following these patterns and implementing proper error handling and monitoring, you can build robust AI-powered features using LangChain and OpenAI in your NestJS application. Remember to monitor your API usage and costs, and implement appropriate caching strategies based on your use case.
Tech Alchymist Portfolio