michon commited on
Commit
72ea3d9
Β·
1 Parent(s): 7872dc7

update backend

Browse files
mrrrme/backend/app.py CHANGED
@@ -12,7 +12,7 @@ print(f"[App] Python version: {sys.version}")
12
  print(f"[App] Working directory: {os.getcwd()}")
13
  print(f"[App] sys.path: {sys.path[:3]}")
14
 
15
- from .models.loader import load_models, models_ready
16
  from .utils.helpers import get_avatar_api_url, check_avatar_service
17
  from .auth.routes import router as auth_router
18
  from .debug.routes import router as debug_router
@@ -70,10 +70,10 @@ async def startup_event():
70
  @app.get("/")
71
  async def root():
72
  """Root endpoint"""
73
- print(f"[App] πŸ“ Root endpoint called, models_ready={models_ready}")
74
  return {
75
  "status": "running",
76
- "models_ready": models_ready,
77
  "message": "MrrrMe AI Backend v2.0",
78
  "endpoints": {
79
  "websocket": "/ws",
@@ -86,10 +86,10 @@ async def root():
86
  @app.get("/health")
87
  async def health():
88
  """Health check - responds immediately"""
89
- print(f"[App] πŸ₯ Health check called, models_ready={models_ready}")
90
  return {
91
  "status": "healthy",
92
- "models_ready": models_ready
93
  }
94
 
95
  print("="*80)
 
12
  print(f"[App] Working directory: {os.getcwd()}")
13
  print(f"[App] sys.path: {sys.path[:3]}")
14
 
15
+ from .models.loader import load_models, model_state
16
  from .utils.helpers import get_avatar_api_url, check_avatar_service
17
  from .auth.routes import router as auth_router
18
  from .debug.routes import router as debug_router
 
70
  @app.get("/")
71
  async def root():
72
  """Root endpoint"""
73
+ print(f"[App] πŸ“ Root endpoint called, models_ready={model_state.ready}")
74
  return {
75
  "status": "running",
76
+ "models_ready": model_state.ready,
77
  "message": "MrrrMe AI Backend v2.0",
78
  "endpoints": {
79
  "websocket": "/ws",
 
86
  @app.get("/health")
87
  async def health():
88
  """Health check - responds immediately"""
89
+ print(f"[App] πŸ₯ Health check called, models_ready={model_state.ready}")
90
  return {
91
  "status": "healthy",
92
+ "models_ready": model_state.ready
93
  }
94
 
95
  print("="*80)
mrrrme/backend/models/__init__.py CHANGED
@@ -1,26 +1,8 @@
1
- """MrrrMe Backend - Models Package"""
2
- from .loader import (
3
  load_models,
4
  get_models,
5
- models_ready,
6
  face_processor,
7
- text_analyzer,
8
- whisper_worker,
9
- voice_worker,
10
- llm_generator,
11
- fusion_engine,
12
- FusionEngine
13
- )
14
-
15
- __all__ = [
16
- 'load_models',
17
- 'get_models',
18
- 'models_ready',
19
- 'face_processor',
20
- 'text_analyzer',
21
- 'whisper_worker',
22
- 'voice_worker',
23
- 'llm_generator',
24
- 'fusion_engine',
25
  'FusionEngine'
26
  ]
 
1
+ 'text_analyzer',
 
2
  load_models,
3
  get_models,
4
+ model_state,
5
  face_processor,
6
+ text_analyzer,ngine',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  'FusionEngine'
8
  ]
mrrrme/backend/models/loader.py CHANGED
@@ -10,7 +10,12 @@ whisper_worker = None
10
  voice_worker = None
11
  llm_generator = None
12
  fusion_engine = None
13
- models_ready = False
 
 
 
 
 
14
 
15
  class FusionEngine:
16
  """Multi-modal emotion fusion engine"""
@@ -37,7 +42,7 @@ class FusionEngine:
37
  async def load_models():
38
  """Load all AI models asynchronously (SERVER MODE)"""
39
  global face_processor, text_analyzer, whisper_worker, voice_worker
40
- global llm_generator, fusion_engine, models_ready
41
 
42
  print("\n" + "="*80)
43
  print("[Backend] πŸš€ INITIALIZING MRRRME AI MODELS (SERVER MODE)")
@@ -88,7 +93,7 @@ async def load_models():
88
  print("[Backend] ℹ️ SERVER MODE: Workers ready but not capturing")
89
  print("[Backend] ℹ️ Audio will be processed from WebSocket messages\n")
90
 
91
- models_ready = True
92
 
93
  print("="*80)
94
  print("[Backend] βœ…βœ…βœ… ALL MODELS LOADED AND READY!")
@@ -122,5 +127,5 @@ def get_models():
122
  'voice_worker': voice_worker,
123
  'llm_generator': llm_generator,
124
  'fusion_engine': fusion_engine,
125
- 'models_ready': models_ready
126
  }
 
10
  voice_worker = None
11
  llm_generator = None
12
  fusion_engine = None
13
+
14
+ class ModelState:
15
+ def __init__(self):
16
+ self.ready = False
17
+
18
+ model_state = ModelState()
19
 
20
  class FusionEngine:
21
  """Multi-modal emotion fusion engine"""
 
42
  async def load_models():
43
  """Load all AI models asynchronously (SERVER MODE)"""
44
  global face_processor, text_analyzer, whisper_worker, voice_worker
45
+ global llm_generator, fusion_engine, model_state
46
 
47
  print("\n" + "="*80)
48
  print("[Backend] πŸš€ INITIALIZING MRRRME AI MODELS (SERVER MODE)")
 
93
  print("[Backend] ℹ️ SERVER MODE: Workers ready but not capturing")
94
  print("[Backend] ℹ️ Audio will be processed from WebSocket messages\n")
95
 
96
+ model_state.ready = True
97
 
98
  print("="*80)
99
  print("[Backend] βœ…βœ…βœ… ALL MODELS LOADED AND READY!")
 
127
  'voice_worker': voice_worker,
128
  'llm_generator': llm_generator,
129
  'fusion_engine': fusion_engine,
130
+ 'models_ready': model_state.ready
131
  }
mrrrme/backend/websocket.py CHANGED
@@ -126,15 +126,15 @@ async def websocket_endpoint(websocket: WebSocket):
126
  print(f"[WebSocket] πŸ“š Loaded {len(user_history)} messages", flush=True)
127
 
128
  # βœ… FIX: Check models_ready from the module, not imported variable
129
- if not models_module.models_ready:
130
- print(f"[WebSocket] ⏳ Models still loading (models_ready={models_module.models_ready}), asking client to wait...", flush=True)
131
  await websocket.send_json({
132
  "type": "status",
133
  "message": "Models loading..."
134
  })
135
 
136
  for i in range(900):
137
- if models_module.models_ready:
138
  await websocket.send_json({
139
  "type": "status",
140
  "message": "Models ready!"
@@ -143,7 +143,7 @@ async def websocket_endpoint(websocket: WebSocket):
143
  break
144
  await asyncio.sleep(1)
145
 
146
- if not models_module.models_ready:
147
  print("[WebSocket] ❌ Models timeout", flush=True)
148
  await websocket.send_json({"type": "error", "message": "Models timeout"})
149
  await websocket.close(code=1011)
 
126
  print(f"[WebSocket] πŸ“š Loaded {len(user_history)} messages", flush=True)
127
 
128
  # βœ… FIX: Check models_ready from the module, not imported variable
129
+ if not models_module.model_state.ready:
130
+ print(f"[WebSocket] ⏳ Models still loading (models_ready={models_module.model_state.ready}), asking client to wait...", flush=True)
131
  await websocket.send_json({
132
  "type": "status",
133
  "message": "Models loading..."
134
  })
135
 
136
  for i in range(900):
137
+ if models_module.model_state.ready:
138
  await websocket.send_json({
139
  "type": "status",
140
  "message": "Models ready!"
 
143
  break
144
  await asyncio.sleep(1)
145
 
146
+ if not models_module.model_state.ready:
147
  print("[WebSocket] ❌ Models timeout", flush=True)
148
  await websocket.send_json({"type": "error", "message": "Models timeout"})
149
  await websocket.close(code=1011)