michon commited on
Commit
8174964
Β·
1 Parent(s): 55b5675

update speech again

Browse files
Files changed (1) hide show
  1. mrrrme/backend/processing/speech.py +86 -74
mrrrme/backend/processing/speech.py CHANGED
@@ -1,3 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  Filter out short or meaningless transcriptions
2
 
3
  Returns:
@@ -37,10 +51,10 @@ async def process_speech_end(
37
  7. Send to client
38
  """
39
 
40
- print(f"\n{'='*80}")
41
- print(f"[Speech End] USER FINISHED SPEAKING: {username}")
42
- print(f"{'='*80}")
43
- print(f"[Transcription] '{transcription}'")
44
 
45
  # Get latest model instances
46
  models = get_models()
@@ -54,7 +68,7 @@ async def process_speech_end(
54
  should_process, skip_reason = filter_transcription(transcription)
55
 
56
  if not should_process:
57
- print(f"[Filter] Skipped: {skip_reason}")
58
  return
59
 
60
  # Step 2: Save user message
@@ -62,57 +76,56 @@ async def process_speech_end(
62
 
63
  try:
64
  # ========== EMOTION DETECTION PIPELINE ==========
65
- print(f"\n[Pipeline] Starting emotion analysis pipeline...")
66
- print(f"{'─'*80}")
67
 
68
  # Step 3: Get face emotion
69
- print(f"[Step 1/4] FACIAL EXPRESSION ANALYSIS")
70
  face_emotion = face_processor.get_last_emotion() or "Neutral"
71
  face_confidence = face_processor.get_last_confidence() or 0.0
72
  face_quality = getattr(face_processor, 'get_last_quality', lambda: 0.5)()
73
 
74
  # Create emotion probabilities
75
- # βœ… FIX #2: Use module-level EMOTION_MAP (capital letters)
76
  face_probs = np.array([0.25, 0.25, 0.25, 0.25], dtype=np.float32)
77
  if face_emotion in EMOTION_MAP:
78
  face_idx = EMOTION_MAP[face_emotion]
79
  face_probs[face_idx] = face_confidence
80
  face_probs = face_probs / face_probs.sum()
81
 
82
- print(f" Result: {face_emotion}")
83
- print(f" Confidence: {face_confidence:.3f}")
84
- print(f" Quality Score: {face_quality:.3f}")
85
- print(f" Distribution: Neutral={face_probs[0]:.3f} | Happy={face_probs[1]:.3f} | Sad={face_probs[2]:.3f} | Angry={face_probs[3]:.3f}")
86
 
87
  # Step 4: Get voice emotion
88
- print(f"\n[Step 2/4] VOICE TONE ANALYSIS")
89
  voice_probs, voice_emotion = voice_worker.get_probs()
90
  voice_state = voice_worker.get_state()
91
  voice_active = voice_state.get('speech_active', False)
92
  voice_inferences = voice_state.get('inference_count', 0)
93
  voice_skipped = voice_state.get('skipped_inferences', 0)
94
 
95
- print(f" {'ACTIVELY PROCESSING' if voice_active else 'IDLE (no recent speech)'}")
96
- print(f" Result: {voice_emotion}")
97
- print(f" Distribution: Neutral={voice_probs[0]:.3f} | Happy={voice_probs[1]:.3f} | Sad={voice_probs[2]:.3f} | Angry={voice_probs[3]:.3f}")
98
- print(f" Inferences completed: {voice_inferences}")
99
- print(f" Skipped (silence optimization): {voice_skipped}")
100
  efficiency = (voice_inferences / (voice_inferences + voice_skipped) * 100) if (voice_inferences + voice_skipped) > 0 else 0
101
- print(f" Processing efficiency: {efficiency:.1f}%")
102
 
103
  # Step 5: Analyze text sentiment
104
- print(f"\n[Step 3/4] TEXT SENTIMENT ANALYSIS")
105
- print(f" Using Whisper transcription")
106
  text_analyzer.analyze(transcription)
107
  text_probs, _ = text_analyzer.get_probs()
108
  text_emotion = ['Neutral', 'Happy', 'Sad', 'Angry'][int(np.argmax(text_probs))]
109
 
110
- print(f" Result: {text_emotion}")
111
- print(f" Distribution: Neutral={text_probs[0]:.3f} | Happy={text_probs[1]:.3f} | Sad={text_probs[2]:.3f} | Angry={text_probs[3]:.3f}")
112
- print(f" Text length: {len(transcription)} characters")
113
 
114
  # Step 6: Calculate fusion with quality-aware weights
115
- print(f"\n[Step 4/4] MULTI-MODAL FUSION")
116
  base_weights = {
117
  'face': fusion_engine.alpha_face,
118
  'voice': fusion_engine.alpha_voice,
@@ -142,12 +155,12 @@ async def process_speech_end(
142
  total_weight = sum(adjusted_weights.values())
143
  final_weights = {k: v/total_weight for k, v in adjusted_weights.items()}
144
 
145
- print(f" Base weights: Face={base_weights['face']:.3f} | Voice={base_weights['voice']:.3f} | Text={base_weights['text']:.3f}")
146
  if adjustments_made:
147
- print(f" Adjustments:")
148
  for adj in adjustments_made:
149
- print(f" - {adj}")
150
- print(f" Final weights: Face={final_weights['face']:.3f} | Voice={final_weights['voice']:.3f} | Text={final_weights['text']:.3f}")
151
 
152
  # Calculate weighted fusion
153
  fused_probs = (
@@ -170,84 +183,83 @@ async def process_speech_end(
170
  all_same = (face_emotion == voice_emotion == text_emotion)
171
  has_conflict = len({face_emotion, voice_emotion, text_emotion}) == 3
172
 
173
- print(f"\n {'─'*76}")
174
- print(f" FUSION RESULTS:")
175
- print(f" {'─'*76}")
176
- print(f" Input emotions:")
177
- # βœ… FIX #3: Use module-level EMOTION_MAP (capital letters)
178
- print(f" Face: {face_emotion:7s} (confidence={face_probs[EMOTION_MAP.get(face_emotion, 0)]:.3f}, weight={final_weights['face']:.3f})")
179
- print(f" Voice: {voice_emotion:7s} (confidence={voice_probs[EMOTION_MAP.get(voice_emotion, 0)]:.3f}, weight={final_weights['voice']:.3f})")
180
- print(f" Text: {text_emotion:7s} (confidence={text_probs[EMOTION_MAP.get(text_emotion, 0)]:.3f}, weight={final_weights['text']:.3f})")
181
- print(f" {'─'*76}")
182
- print(f" FUSED EMOTION: {fused_emotion}")
183
- print(f" Intensity: {intensity:.3f}")
184
- print(f" Fused distribution: Neutral={fused_probs[0]:.3f} | Happy={fused_probs[1]:.3f} | Sad={fused_probs[2]:.3f} | Angry={fused_probs[3]:.3f}")
185
- print(f" {'─'*76}")
186
- print(f" Agreement: {agreement_count}/3 modalities ({agreement_score*100:.1f}%)")
187
 
188
  if all_same:
189
- print(f" Status: Perfect agreement - all modalities aligned")
190
  elif has_conflict:
191
- print(f" Status: Full conflict - weighted fusion resolved disagreement")
192
  else:
193
- print(f" Status: Partial agreement - majority vote with confidence weighting")
194
 
195
- print(f" {'─'*76}")
196
 
197
  # ========== LLM INPUT PREPARATION ==========
198
- print(f"\n[LLM Input] Preparing context for language model...")
199
 
200
  user_language = user_preferences.get("language", "en")
201
 
202
  context_prefix = ""
203
  if user_summary:
204
  context_prefix = f"[User context for {username}: {user_summary}]\n\n"
205
- print(f"[LLM Input] - User context: YES ({len(user_summary)} chars)")
206
  else:
207
- print(f"[LLM Input] - User context: NO (new user)")
208
 
209
  # Add language instruction
210
  if user_language == "nl":
211
  context_prefix += "[BELANGRIJK: Antwoord ALTIJD in het Nederlands!]\n\n"
212
- print(f"[LLM Input] - Language: Dutch (Nederlands)")
213
  else:
214
  context_prefix += "[IMPORTANT: ALWAYS respond in English!]\n\n"
215
- print(f"[LLM Input] - Language: English")
216
 
217
  full_llm_input = context_prefix + transcription
218
 
219
- print(f"[LLM Input] - Fused emotion: {fused_emotion}")
220
- print(f"[LLM Input] - Face emotion: {face_emotion}")
221
- print(f"[LLM Input] - Voice emotion: {voice_emotion}")
222
- print(f"[LLM Input] - Intensity: {intensity:.3f}")
223
- print(f"[LLM Input] - User text: '{transcription}'")
224
- print(f"[LLM Input] - Full prompt length: {len(full_llm_input)} chars")
225
 
226
  if len(context_prefix) > 50:
227
- print(f"[LLM Input] - Context preview: '{context_prefix[:100]}...'")
228
 
229
  # Generate LLM response
230
- print(f"\n[LLM] Generating response...")
231
  response_text = llm_generator.generate_response(
232
  fused_emotion, face_emotion, voice_emotion,
233
  full_llm_input, force=True, intensity=intensity
234
  )
235
 
236
- print(f"[LLM] Response generated: '{response_text}'")
237
 
238
  # Save assistant message
239
  save_message(session_id, "assistant", response_text, fused_emotion)
240
 
241
  # ========== SEND TO AVATAR FOR TTS ==========
242
- print(f"\n[TTS] Sending to avatar backend...")
243
 
244
  try:
245
  voice_preference = user_preferences.get("voice", "female")
246
  language_preference = user_preferences.get("language", "en")
247
 
248
- print(f"[TTS] - Voice: {voice_preference}")
249
- print(f"[TTS] - Language: {language_preference}")
250
- print(f"[TTS] - Text: '{response_text}'")
251
 
252
  avatar_response = requests.post(
253
  f"{AVATAR_API}/speak",
@@ -261,9 +273,9 @@ async def process_speech_end(
261
  avatar_response.raise_for_status()
262
  avatar_data = avatar_response.json()
263
 
264
- print(f"[TTS] Avatar TTS generated")
265
- print(f"[TTS] - Audio URL: {avatar_data.get('audio_url', 'N/A')}")
266
- print(f"[TTS] - Visemes: {len(avatar_data.get('visemes', []))} keyframes")
267
 
268
  await websocket.send_json({
269
  "type": "llm_response",
@@ -274,10 +286,10 @@ async def process_speech_end(
274
  "visemes": avatar_data.get("visemes")
275
  })
276
 
277
- print(f"[Pipeline] Complete response sent to {username}")
278
 
279
  except requests.exceptions.ConnectionError:
280
- print(f"[TTS] Avatar service not available - sending text-only")
281
  await websocket.send_json({
282
  "type": "llm_response",
283
  "text": response_text,
@@ -287,7 +299,7 @@ async def process_speech_end(
287
  })
288
 
289
  except Exception as avatar_err:
290
- print(f"[TTS] Avatar error: {avatar_err}")
291
  await websocket.send_json({
292
  "type": "llm_response",
293
  "text": response_text,
@@ -297,9 +309,9 @@ async def process_speech_end(
297
  "text_only": True
298
  })
299
 
300
- print(f"{'='*80}\n")
301
 
302
  except Exception as e:
303
- print(f"[Pipeline] Error in emotion processing: {e}")
304
  import traceback
305
  traceback.print_exc()
 
1
+ """MrrrMe Backend - Speech Processing Pipeline (COMPLETE)"""
2
+ import requests
3
+ import numpy as np
4
+ from typing import Optional, Dict
5
+ from ..models.loader import get_models
6
+ from ..session.manager import save_message
7
+ from ..utils.helpers import get_avatar_api_url
8
+ from ..config import HALLUCINATION_PHRASES, MIN_TRANSCRIPTION_LENGTH, EMOTION_MAP
9
+
10
+ AVATAR_API = get_avatar_api_url()
11
+
12
+
13
+ def filter_transcription(transcription: str) -> tuple:
14
+ """
15
  Filter out short or meaningless transcriptions
16
 
17
  Returns:
 
51
  7. Send to client
52
  """
53
 
54
+ print(f"\n{'='*80}", flush=True)
55
+ print(f"[Speech End] USER FINISHED SPEAKING: {username}", flush=True)
56
+ print(f"{'='*80}", flush=True)
57
+ print(f"[Transcription] '{transcription}'", flush=True)
58
 
59
  # Get latest model instances
60
  models = get_models()
 
68
  should_process, skip_reason = filter_transcription(transcription)
69
 
70
  if not should_process:
71
+ print(f"[Filter] Skipped: {skip_reason}", flush=True)
72
  return
73
 
74
  # Step 2: Save user message
 
76
 
77
  try:
78
  # ========== EMOTION DETECTION PIPELINE ==========
79
+ print(f"\n[Pipeline] Starting emotion analysis pipeline...", flush=True)
80
+ print(f"{'─'*80}", flush=True)
81
 
82
  # Step 3: Get face emotion
83
+ print(f"[Step 1/4] FACIAL EXPRESSION ANALYSIS", flush=True)
84
  face_emotion = face_processor.get_last_emotion() or "Neutral"
85
  face_confidence = face_processor.get_last_confidence() or 0.0
86
  face_quality = getattr(face_processor, 'get_last_quality', lambda: 0.5)()
87
 
88
  # Create emotion probabilities
 
89
  face_probs = np.array([0.25, 0.25, 0.25, 0.25], dtype=np.float32)
90
  if face_emotion in EMOTION_MAP:
91
  face_idx = EMOTION_MAP[face_emotion]
92
  face_probs[face_idx] = face_confidence
93
  face_probs = face_probs / face_probs.sum()
94
 
95
+ print(f" Result: {face_emotion}", flush=True)
96
+ print(f" Confidence: {face_confidence:.3f}", flush=True)
97
+ print(f" Quality Score: {face_quality:.3f}", flush=True)
98
+ print(f" Distribution: Neutral={face_probs[0]:.3f} | Happy={face_probs[1]:.3f} | Sad={face_probs[2]:.3f} | Angry={face_probs[3]:.3f}", flush=True)
99
 
100
  # Step 4: Get voice emotion
101
+ print(f"\n[Step 2/4] VOICE TONE ANALYSIS", flush=True)
102
  voice_probs, voice_emotion = voice_worker.get_probs()
103
  voice_state = voice_worker.get_state()
104
  voice_active = voice_state.get('speech_active', False)
105
  voice_inferences = voice_state.get('inference_count', 0)
106
  voice_skipped = voice_state.get('skipped_inferences', 0)
107
 
108
+ print(f" {'ACTIVELY PROCESSING' if voice_active else 'IDLE (no recent speech)'}", flush=True)
109
+ print(f" Result: {voice_emotion}", flush=True)
110
+ print(f" Distribution: Neutral={voice_probs[0]:.3f} | Happy={voice_probs[1]:.3f} | Sad={voice_probs[2]:.3f} | Angry={voice_probs[3]:.3f}", flush=True)
111
+ print(f" Inferences completed: {voice_inferences}", flush=True)
112
+ print(f" Skipped (silence optimization): {voice_skipped}", flush=True)
113
  efficiency = (voice_inferences / (voice_inferences + voice_skipped) * 100) if (voice_inferences + voice_skipped) > 0 else 0
114
+ print(f" Processing efficiency: {efficiency:.1f}%", flush=True)
115
 
116
  # Step 5: Analyze text sentiment
117
+ print(f"\n[Step 3/4] TEXT SENTIMENT ANALYSIS", flush=True)
118
+ print(f" Using Whisper transcription", flush=True)
119
  text_analyzer.analyze(transcription)
120
  text_probs, _ = text_analyzer.get_probs()
121
  text_emotion = ['Neutral', 'Happy', 'Sad', 'Angry'][int(np.argmax(text_probs))]
122
 
123
+ print(f" Result: {text_emotion}", flush=True)
124
+ print(f" Distribution: Neutral={text_probs[0]:.3f} | Happy={text_probs[1]:.3f} | Sad={text_probs[2]:.3f} | Angry={text_probs[3]:.3f}", flush=True)
125
+ print(f" Text length: {len(transcription)} characters", flush=True)
126
 
127
  # Step 6: Calculate fusion with quality-aware weights
128
+ print(f"\n[Step 4/4] MULTI-MODAL FUSION", flush=True)
129
  base_weights = {
130
  'face': fusion_engine.alpha_face,
131
  'voice': fusion_engine.alpha_voice,
 
155
  total_weight = sum(adjusted_weights.values())
156
  final_weights = {k: v/total_weight for k, v in adjusted_weights.items()}
157
 
158
+ print(f" Base weights: Face={base_weights['face']:.3f} | Voice={base_weights['voice']:.3f} | Text={base_weights['text']:.3f}", flush=True)
159
  if adjustments_made:
160
+ print(f" Adjustments:", flush=True)
161
  for adj in adjustments_made:
162
+ print(f" - {adj}", flush=True)
163
+ print(f" Final weights: Face={final_weights['face']:.3f} | Voice={final_weights['voice']:.3f} | Text={final_weights['text']:.3f}", flush=True)
164
 
165
  # Calculate weighted fusion
166
  fused_probs = (
 
183
  all_same = (face_emotion == voice_emotion == text_emotion)
184
  has_conflict = len({face_emotion, voice_emotion, text_emotion}) == 3
185
 
186
+ print(f"\n {'─'*76}", flush=True)
187
+ print(f" FUSION RESULTS:", flush=True)
188
+ print(f" {'─'*76}", flush=True)
189
+ print(f" Input emotions:", flush=True)
190
+ print(f" Face: {face_emotion:7s} (confidence={face_probs[EMOTION_MAP.get(face_emotion, 0)]:.3f}, weight={final_weights['face']:.3f})", flush=True)
191
+ print(f" Voice: {voice_emotion:7s} (confidence={voice_probs[EMOTION_MAP.get(voice_emotion, 0)]:.3f}, weight={final_weights['voice']:.3f})", flush=True)
192
+ print(f" Text: {text_emotion:7s} (confidence={text_probs[EMOTION_MAP.get(text_emotion, 0)]:.3f}, weight={final_weights['text']:.3f})", flush=True)
193
+ print(f" {'─'*76}", flush=True)
194
+ print(f" FUSED EMOTION: {fused_emotion}", flush=True)
195
+ print(f" Intensity: {intensity:.3f}", flush=True)
196
+ print(f" Fused distribution: Neutral={fused_probs[0]:.3f} | Happy={fused_probs[1]:.3f} | Sad={fused_probs[2]:.3f} | Angry={fused_probs[3]:.3f}", flush=True)
197
+ print(f" {'─'*76}", flush=True)
198
+ print(f" Agreement: {agreement_count}/3 modalities ({agreement_score*100:.1f}%)", flush=True)
 
199
 
200
  if all_same:
201
+ print(f" Status: Perfect agreement - all modalities aligned", flush=True)
202
  elif has_conflict:
203
+ print(f" Status: Full conflict - weighted fusion resolved disagreement", flush=True)
204
  else:
205
+ print(f" Status: Partial agreement - majority vote with confidence weighting", flush=True)
206
 
207
+ print(f" {'─'*76}", flush=True)
208
 
209
  # ========== LLM INPUT PREPARATION ==========
210
+ print(f"\n[LLM Input] Preparing context for language model...", flush=True)
211
 
212
  user_language = user_preferences.get("language", "en")
213
 
214
  context_prefix = ""
215
  if user_summary:
216
  context_prefix = f"[User context for {username}: {user_summary}]\n\n"
217
+ print(f"[LLM Input] - User context: YES ({len(user_summary)} chars)", flush=True)
218
  else:
219
+ print(f"[LLM Input] - User context: NO (new user)", flush=True)
220
 
221
  # Add language instruction
222
  if user_language == "nl":
223
  context_prefix += "[BELANGRIJK: Antwoord ALTIJD in het Nederlands!]\n\n"
224
+ print(f"[LLM Input] - Language: Dutch (Nederlands)", flush=True)
225
  else:
226
  context_prefix += "[IMPORTANT: ALWAYS respond in English!]\n\n"
227
+ print(f"[LLM Input] - Language: English", flush=True)
228
 
229
  full_llm_input = context_prefix + transcription
230
 
231
+ print(f"[LLM Input] - Fused emotion: {fused_emotion}", flush=True)
232
+ print(f"[LLM Input] - Face emotion: {face_emotion}", flush=True)
233
+ print(f"[LLM Input] - Voice emotion: {voice_emotion}", flush=True)
234
+ print(f"[LLM Input] - Intensity: {intensity:.3f}", flush=True)
235
+ print(f"[LLM Input] - User text: '{transcription}'", flush=True)
236
+ print(f"[LLM Input] - Full prompt length: {len(full_llm_input)} chars", flush=True)
237
 
238
  if len(context_prefix) > 50:
239
+ print(f"[LLM Input] - Context preview: '{context_prefix[:100]}...'", flush=True)
240
 
241
  # Generate LLM response
242
+ print(f"\n[LLM] Generating response...", flush=True)
243
  response_text = llm_generator.generate_response(
244
  fused_emotion, face_emotion, voice_emotion,
245
  full_llm_input, force=True, intensity=intensity
246
  )
247
 
248
+ print(f"[LLM] Response generated: '{response_text}'", flush=True)
249
 
250
  # Save assistant message
251
  save_message(session_id, "assistant", response_text, fused_emotion)
252
 
253
  # ========== SEND TO AVATAR FOR TTS ==========
254
+ print(f"\n[TTS] Sending to avatar backend...", flush=True)
255
 
256
  try:
257
  voice_preference = user_preferences.get("voice", "female")
258
  language_preference = user_preferences.get("language", "en")
259
 
260
+ print(f"[TTS] - Voice: {voice_preference}", flush=True)
261
+ print(f"[TTS] - Language: {language_preference}", flush=True)
262
+ print(f"[TTS] - Text: '{response_text}'", flush=True)
263
 
264
  avatar_response = requests.post(
265
  f"{AVATAR_API}/speak",
 
273
  avatar_response.raise_for_status()
274
  avatar_data = avatar_response.json()
275
 
276
+ print(f"[TTS] Avatar TTS generated", flush=True)
277
+ print(f"[TTS] - Audio URL: {avatar_data.get('audio_url', 'N/A')}", flush=True)
278
+ print(f"[TTS] - Visemes: {len(avatar_data.get('visemes', []))} keyframes", flush=True)
279
 
280
  await websocket.send_json({
281
  "type": "llm_response",
 
286
  "visemes": avatar_data.get("visemes")
287
  })
288
 
289
+ print(f"[Pipeline] Complete response sent to {username}", flush=True)
290
 
291
  except requests.exceptions.ConnectionError:
292
+ print(f"[TTS] Avatar service not available - sending text-only", flush=True)
293
  await websocket.send_json({
294
  "type": "llm_response",
295
  "text": response_text,
 
299
  })
300
 
301
  except Exception as avatar_err:
302
+ print(f"[TTS] Avatar error: {avatar_err}", flush=True)
303
  await websocket.send_json({
304
  "type": "llm_response",
305
  "text": response_text,
 
309
  "text_only": True
310
  })
311
 
312
+ print(f"{'='*80}\n", flush=True)
313
 
314
  except Exception as e:
315
+ print(f"[Pipeline] Error in emotion processing: {e}", flush=True)
316
  import traceback
317
  traceback.print_exc()