Add Chinese translations for medication management and personal settings

- Introduced new translation files for medication, personal, and weight management in Chinese.
- Updated the main index file to include the new translation modules.
- Enhanced the medication type definitions to include 'ointment'.
- Refactored workout type labels to utilize i18n for better localization support.
- Improved sleep quality descriptions and recommendations with i18n integration.
This commit is contained in:
richarjiang
2025-11-28 17:29:51 +08:00
parent fbe0c92f0f
commit bca6670390
42 changed files with 7972 additions and 6632 deletions

View File

@@ -3,6 +3,7 @@ import { Colors } from '@/constants/Colors';
import { useAppDispatch } from '@/hooks/redux';
import { useAuthGuard } from '@/hooks/useAuthGuard';
import { useColorScheme } from '@/hooks/useColorScheme';
import { useI18n } from '@/hooks/useI18n';
import { useSafeAreaTop } from '@/hooks/useSafeAreaWithPadding';
import { analyzeFoodFromText } from '@/services/foodRecognition';
import { saveRecognitionResult, setError, setLoading } from '@/store/foodRecognitionSlice';
@@ -24,6 +25,7 @@ import {
type VoiceRecordState = 'idle' | 'listening' | 'processing' | 'result' | 'analyzing';
export default function VoiceRecordScreen() {
const { t } = useI18n();
const safeAreaTop = useSafeAreaTop()
const theme = useColorScheme() ?? 'light';
const colorTokens = Colors[theme];
@@ -118,7 +120,7 @@ export default function VoiceRecordScreen() {
// 语音识别回调 - 使用 useCallback 避免每次渲染重新创建
const onSpeechStart = useCallback(() => {
console.log('语音开始');
console.log('Voice started');
if (!isMountedRef.current) return;
setIsListening(true);
@@ -128,11 +130,11 @@ export default function VoiceRecordScreen() {
}, []);
const onSpeechRecognized = useCallback(() => {
console.log('语音识别中...');
console.log('Voice recognition in progress...');
}, []);
const onSpeechEnd = useCallback(() => {
console.log('语音结束');
console.log('Voice ended');
if (!isMountedRef.current) return;
setIsListening(false);
@@ -141,7 +143,7 @@ export default function VoiceRecordScreen() {
}, []);
const onSpeechError = useCallback((error: any) => {
console.log('语音识别错误:', error);
console.log('Voice recognition error:', error);
if (!isMountedRef.current) return;
setIsListening(false);
@@ -150,16 +152,16 @@ export default function VoiceRecordScreen() {
// 显示更友好的错误信息
if (error.error?.code === '7') {
Alert.alert('提示', '没有检测到语音输入,请重试');
Alert.alert(t('voiceRecord.alerts.noVoiceInput'), t('voiceRecord.alerts.noVoiceInput'));
} else if (error.error?.code === '2') {
Alert.alert('提示', '网络连接异常,请检查网络后重试');
Alert.alert(t('voiceRecord.alerts.networkError'), t('voiceRecord.alerts.networkError'));
} else {
Alert.alert('提示', '语音识别出现问题,请重试');
Alert.alert(t('voiceRecord.alerts.voiceError'), t('voiceRecord.alerts.voiceError'));
}
}, []);
const onSpeechResults = useCallback((event: any) => {
console.log('语音识别结果:', event);
console.log('Voice recognition result:', event);
if (!isMountedRef.current) return;
const text = event.value?.[0] || '';
@@ -168,7 +170,7 @@ export default function VoiceRecordScreen() {
setRecordState('result');
} else {
setRecordState('idle');
Alert.alert('提示', '未识别到有效内容,请重新录音');
Alert.alert(t('voiceRecord.alerts.noValidContent'), t('voiceRecord.alerts.noValidContent'));
}
stopAnimations();
}, []);
@@ -215,7 +217,7 @@ export default function VoiceRecordScreen() {
await Voice.destroy();
Voice.removeAllListeners();
} catch (error) {
console.log('清理语音识别资源失败:', error);
console.log('Failed to clean up voice recognition resources:', error);
}
};
cleanup();
@@ -246,22 +248,22 @@ export default function VoiceRecordScreen() {
await Voice.start('zh-CN');
} catch (error) {
console.log('启动语音识别失败:', error);
console.log('Failed to start voice recognition:', error);
setRecordState('idle');
setIsListening(false);
Alert.alert('录音失败', '无法启动语音识别,请检查麦克风权限设置');
Alert.alert(t('voiceRecord.alerts.recordingFailed'), t('voiceRecord.alerts.recordingPermissionError'));
}
};
// 停止录音
const stopRecording = async () => {
try {
console.log('停止录音');
console.log('Stop recording');
setIsListening(false);
await Voice.stop();
triggerHapticFeedback('impactLight');
} catch (error) {
console.log('停止语音识别失败:', error);
console.log('Failed to stop voice recognition:', error);
setIsListening(false);
setRecordState('idle');
}
@@ -287,7 +289,7 @@ export default function VoiceRecordScreen() {
startRecording();
}, 200);
} catch (error) {
console.log('重新录音失败:', error);
console.log('Failed to retry recording:', error);
setRecordState('idle');
setIsListening(false);
}
@@ -296,7 +298,7 @@ export default function VoiceRecordScreen() {
// 确认并分析食物文本
const confirmResult = async () => {
if (!recognizedText.trim()) {
Alert.alert('提示', '请先进行语音识别');
Alert.alert(t('voiceRecord.alerts.pleaseRecordFirst'), t('voiceRecord.alerts.pleaseRecordFirst'));
return;
}
@@ -382,7 +384,7 @@ export default function VoiceRecordScreen() {
const errorMessage = error instanceof Error ? error.message : '分析失败,请重试';
dispatch(setError(errorMessage));
Alert.alert('分析失败', errorMessage);
Alert.alert(t('voiceRecord.alerts.analysisFailed'), errorMessage);
}
};
@@ -401,7 +403,7 @@ export default function VoiceRecordScreen() {
router.back();
} catch (error) {
console.log('返回时清理资源失败:', error);
console.log('Failed to clean up resources when returning:', error);
router.back();
}
};
@@ -410,15 +412,15 @@ export default function VoiceRecordScreen() {
const getStatusText = () => {
switch (recordState) {
case 'idle':
return '轻触麦克风开始录音';
return t('voiceRecord.status.idle');
case 'listening':
return '正在聆听中,请开始说话...';
return t('voiceRecord.status.listening');
case 'processing':
return 'AI正在处理语音内容...';
return t('voiceRecord.status.processing');
case 'analyzing':
return 'AI大模型深度分析营养成分中...';
return t('voiceRecord.status.analyzing');
case 'result':
return '语音识别完成,请确认结果';
return t('voiceRecord.status.result');
default:
return '';
}
@@ -470,7 +472,7 @@ export default function VoiceRecordScreen() {
return (
<View style={[styles.container, { backgroundColor: colorTokens.background }]}>
<HeaderBar
title="一句话记录"
title={t('voiceRecord.title')}
onBack={handleBack}
tone={theme}
variant="elevated"
@@ -485,7 +487,7 @@ export default function VoiceRecordScreen() {
<View style={styles.topSection}>
<View style={styles.introContainer}>
<Text style={[styles.introDescription, { color: colorTokens.textSecondary }]}>
AI将智能分析营养成分和卡路里
{t('voiceRecord.intro.description')}
</Text>
</View>
</View>
@@ -605,7 +607,7 @@ export default function VoiceRecordScreen() {
{recordState === 'listening' && (
<Text style={[styles.hintText, { color: colorTokens.textSecondary }]}>
{t('voiceRecord.hints.listening')}
</Text>
)}
@@ -614,18 +616,18 @@ export default function VoiceRecordScreen() {
<BlurView intensity={20} tint={theme} style={styles.examplesContainer}>
<View style={styles.examplesContent}>
<Text style={[styles.examplesTitle, { color: colorTokens.text }]}>
{t('voiceRecord.examples.title')}
</Text>
<View style={styles.examplesList}>
<Text style={[styles.exampleText, { color: colorTokens.textSecondary }]}>
&ldquo;&rdquo;
</Text>
<Text style={[styles.exampleText, { color: colorTokens.textSecondary }]}>
&ldquo;150&rdquo;
</Text>
<Text style={[styles.exampleText, { color: colorTokens.textSecondary }]}>
&ldquo;&rdquo;
</Text>
{[
t('voiceRecord.examples.items.0'),
t('voiceRecord.examples.items.1'),
t('voiceRecord.examples.items.2')
].map((example: string, index: number) => (
<Text key={index} style={[styles.exampleText, { color: colorTokens.textSecondary }]}>
&ldquo;{example}&rdquo;
</Text>
))}
</View>
</View>
</BlurView>
@@ -634,7 +636,7 @@ export default function VoiceRecordScreen() {
{recordState === 'analyzing' && (
<View style={styles.analysisProgressContainer}>
<Text style={[styles.progressText, { color: colorTokens.text }]}>
: {Math.round(analysisProgress)}%
{t('voiceRecord.analysis.progress', { progress: Math.round(analysisProgress) })}
</Text>
<View style={styles.progressBarContainer}>
<Animated.View
@@ -650,7 +652,7 @@ export default function VoiceRecordScreen() {
/>
</View>
<Text style={[styles.analysisHint, { color: colorTokens.textSecondary }]}>
AI正在深度分析您的食物描述...
{t('voiceRecord.analysis.hint')}
</Text>
</View>
)}
@@ -662,7 +664,7 @@ export default function VoiceRecordScreen() {
<BlurView intensity={20} tint={theme} style={styles.resultContainer}>
<View style={styles.resultContent}>
<Text style={[styles.resultLabel, { color: colorTokens.textSecondary }]}>
:
{t('voiceRecord.result.label')}
</Text>
<Text style={[styles.resultText, { color: colorTokens.text }]}>
{recognizedText}
@@ -675,7 +677,7 @@ export default function VoiceRecordScreen() {
onPress={retryRecording}
>
<Ionicons name="refresh" size={16} color="#7B68EE" />
<Text style={styles.retryButtonText}></Text>
<Text style={styles.retryButtonText}>{t('voiceRecord.actions.retry')}</Text>
</TouchableOpacity>
<TouchableOpacity
@@ -683,7 +685,7 @@ export default function VoiceRecordScreen() {
onPress={confirmResult}
>
<Ionicons name="checkmark" size={16} color="white" />
<Text style={styles.confirmButtonText}>使</Text>
<Text style={styles.confirmButtonText}>{t('voiceRecord.actions.confirm')}</Text>
</TouchableOpacity>
</View>
)}