feat: 添加GLM-4.5V和DashScope模型支持,优化饮食分析服务的API调用

This commit is contained in:
richarjiang
2025-09-04 10:16:24 +08:00
parent d34f752776
commit 97e6a0ff6d
2 changed files with 129 additions and 47 deletions

24
.env.glm.example Normal file
View File

@@ -0,0 +1,24 @@
# GLM-4.5V Configuration Example
# Copy this to your .env file and update with your actual API key
# AI Vision Provider - set to 'glm' to use GLM-4.5V, 'dashscope' for Qwen (default)
AI_VISION_PROVIDER=glm
# GLM-4.5V API Configuration
GLM_API_KEY=your_glm_api_key_here
GLM_BASE_URL=https://open.bigmodel.cn/api/paas/v4
# GLM Model Names
GLM_MODEL=glm-4-flash
GLM_VISION_MODEL=glm-4v-plus
# Alternative: Use GLM-4.5V models (if available)
# GLM_MODEL=glm-4.5
# GLM_VISION_MODEL=glm-4.5v
# DashScope Configuration (fallback/default)
# Keep these for fallback or if you want to switch between providers
DASHSCOPE_API_KEY=your_dashscope_api_key_here
DASHSCOPE_BASE_URL=https://dashscope.aliyuncs.com/compatible-mode/v1
DASHSCOPE_MODEL=qwen-flash
DASHSCOPE_VISION_MODEL=qwen-vl-max

View File

@@ -57,6 +57,10 @@ export interface FoodRecognitionResult {
/**
* 饮食分析服务
* 负责处理饮食相关的AI分析、营养评估和上下文构建
*
* 支持多种AI模型
* - GLM-4.5V (智谱AI) - 设置 AI_VISION_PROVIDER=glm
* - Qwen VL (阿里云DashScope) - 设置 AI_VISION_PROVIDER=dashscope (默认)
*/
@Injectable()
export class DietAnalysisService {
@@ -64,21 +68,111 @@ export class DietAnalysisService {
private readonly client: OpenAI;
private readonly visionModel: string;
private readonly model: string;
private readonly apiProvider: string;
constructor(
private readonly configService: ConfigService,
private readonly dietRecordsService: DietRecordsService,
) {
const dashScopeApiKey = this.configService.get<string>('DASHSCOPE_API_KEY') || 'sk-e3ff4494c2f1463a8910d5b3d05d3143';
const baseURL = this.configService.get<string>('DASHSCOPE_BASE_URL') || 'https://dashscope.aliyuncs.com/compatible-mode/v1';
// Support both GLM-4.5V and DashScope (Qwen) models
this.apiProvider = this.configService.get<string>('AI_VISION_PROVIDER') || 'dashscope';
if (this.apiProvider === 'glm') {
// GLM-4.5V Configuration
const glmApiKey = this.configService.get<string>('GLM_API_KEY');
const glmBaseURL = this.configService.get<string>('GLM_BASE_URL') || 'https://open.bigmodel.cn/api/paas/v4';
this.client = new OpenAI({
apiKey: glmApiKey,
baseURL: glmBaseURL,
});
this.model = this.configService.get<string>('GLM_MODEL') || 'glm-4-flash';
this.visionModel = this.configService.get<string>('GLM_VISION_MODEL') || 'glm-4v-plus';
} else {
// DashScope Configuration (default)
const dashScopeApiKey = this.configService.get<string>('DASHSCOPE_API_KEY') || 'sk-e3ff4494c2f1463a8910d5b3d05d3143';
const baseURL = this.configService.get<string>('DASHSCOPE_BASE_URL') || 'https://dashscope.aliyuncs.com/compatible-mode/v1';
this.client = new OpenAI({
apiKey: dashScopeApiKey,
baseURL,
this.client = new OpenAI({
apiKey: dashScopeApiKey,
baseURL,
});
this.model = this.configService.get<string>('DASHSCOPE_MODEL') || 'qwen-flash';
this.visionModel = this.configService.get<string>('DASHSCOPE_VISION_MODEL') || 'qwen-vl-max';
}
}
/**
* 制作视觉模型API调用 - 兼容GLM-4.5V和DashScope
* @param prompt 提示文本
* @param imageUrls 图片URL数组
* @returns API响应
*/
private async makeVisionApiCall(prompt: string, imageUrls: string[]) {
const baseParams = {
model: this.visionModel,
temperature: 0.3,
response_format: { type: 'json_object' } as any,
};
if (this.apiProvider === 'glm') {
// GLM-4.5V format
return await this.client.chat.completions.create({
...baseParams,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: prompt },
...imageUrls.map((imageUrl) => ({
type: 'image_url',
image_url: { url: imageUrl }
} as any)),
] as any,
},
],
} as any);
} else {
// DashScope format (default)
return await this.client.chat.completions.create({
...baseParams,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: prompt },
...imageUrls.map((imageUrl) => ({ type: 'image_url', image_url: { url: imageUrl } as any })),
] as any,
},
],
});
}
}
/**
* 制作文本模型API调用 - 兼容GLM-4.5和DashScope
* @param prompt 提示文本
* @param userText 用户文本
* @returns API响应
*/
private async makeTextApiCall(prompt: string, userText: string) {
const baseParams = {
model: this.model,
temperature: 0.3,
response_format: { type: 'json_object' } as any,
};
return await this.client.chat.completions.create({
...baseParams,
messages: [
{
role: 'user',
content: `${prompt}\n\n用户描述${userText}`
}
],
});
this.model = this.configService.get<string>('DASHSCOPE_MODEL') || 'qwen-flash';
this.visionModel = this.configService.get<string>('DASHSCOPE_VISION_MODEL') || 'qwen-vl-max';
}
/**
@@ -93,20 +187,7 @@ export class DietAnalysisService {
const prompt = this.buildFoodRecognitionPrompt(suggestedMealType);
const completion = await this.client.chat.completions.create({
model: this.visionModel,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: prompt },
...imageUrls.map((imageUrl) => ({ type: 'image_url', image_url: { url: imageUrl } as any })),
] as any,
},
],
temperature: 0.3,
response_format: { type: 'json_object' } as any,
});
const completion = await this.makeVisionApiCall(prompt, imageUrls);
const rawResult = completion.choices?.[0]?.message?.content || '{}';
this.logger.log(`Food recognition result: ${rawResult}`);
@@ -136,20 +217,7 @@ export class DietAnalysisService {
const prompt = this.buildDietAnalysisPrompt(suggestedMealType);
const completion = await this.client.chat.completions.create({
model: this.visionModel,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: prompt },
...imageUrls.map((imageUrl) => ({ type: 'image_url', image_url: { url: imageUrl } as any })),
] as any,
},
],
temperature: 0.3,
response_format: { type: 'json_object' } as any,
});
const completion = await this.makeVisionApiCall(prompt, imageUrls);
const rawResult = completion.choices?.[0]?.message?.content || '{}';
this.logger.log(`Enhanced diet analysis result: ${rawResult}`);
@@ -177,17 +245,7 @@ export class DietAnalysisService {
const prompt = this.buildTextDietAnalysisPrompt(suggestedMealType);
const completion = await this.client.chat.completions.create({
model: this.model,
messages: [
{
role: 'user',
content: `${prompt}\n\n用户描述${userText}`
}
],
temperature: 0.3,
response_format: { type: 'json_object' } as any,
});
const completion = await this.makeTextApiCall(prompt, userText);
const rawResult = completion.choices?.[0]?.message?.content || '{}';
this.logger.log(`Text diet analysis result: ${rawResult}`);