Spaces:
Sleeping
Sleeping
Update smolagents_agent.py
Browse files- smolagents_agent.py +28 -5
smolagents_agent.py
CHANGED
|
@@ -548,18 +548,41 @@ class OptimizedSmolagentsGAIAgent:
|
|
| 548 |
if not hf_token:
|
| 549 |
print("HF_TOKEN not found. Please set it in environment variables")
|
| 550 |
return None
|
|
|
|
| 551 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 552 |
from smolagents import InferenceClientModel
|
| 553 |
-
model = InferenceClientModel(
|
| 554 |
-
|
| 555 |
-
token=hf_token
|
| 556 |
-
)
|
| 557 |
-
print("Using HuggingFace model")
|
| 558 |
return model
|
|
|
|
| 559 |
except Exception as e:
|
| 560 |
print(f"Error initializing HuggingFace model: {e}")
|
| 561 |
return None
|
| 562 |
|
|
|
|
| 563 |
def classify_question(self, question: str) -> Dict[str, Any]:
|
| 564 |
"""Enhanced question classification with confidence scores"""
|
| 565 |
q_lower = question.lower()
|
|
|
|
| 548 |
if not hf_token:
|
| 549 |
print("HF_TOKEN not found. Please set it in environment variables")
|
| 550 |
return None
|
| 551 |
+
|
| 552 |
try:
|
| 553 |
+
# Try multiple model options for better reliability and timeout handling
|
| 554 |
+
model_options = [
|
| 555 |
+
"allenai/Olmo-3-32B-Think",
|
| 556 |
+
"microsoft/DialoGPT-small",
|
| 557 |
+
"google/flan-t5-small",
|
| 558 |
+
"meta-llama/Llama-3.2-1B-Instruct"
|
| 559 |
+
]
|
| 560 |
+
|
| 561 |
+
for model in model_options:
|
| 562 |
+
try:
|
| 563 |
+
from smolagents import InferenceClientModel
|
| 564 |
+
model = InferenceClientModel(
|
| 565 |
+
model_id=model,
|
| 566 |
+
token=hf_token,
|
| 567 |
+
timeout=60 # 60 second timeout
|
| 568 |
+
)
|
| 569 |
+
print(f"Using model: {model_id}")
|
| 570 |
+
return model
|
| 571 |
+
except Exception as model_error:
|
| 572 |
+
print(f"Failed to initialize {model_id}: {model_error}")
|
| 573 |
+
continue
|
| 574 |
+
|
| 575 |
+
# Fallback: basic model without specific model_id
|
| 576 |
from smolagents import InferenceClientModel
|
| 577 |
+
model = InferenceClientModel(token=hf_token, timeout=30)
|
| 578 |
+
print("Using default HuggingFace model")
|
|
|
|
|
|
|
|
|
|
| 579 |
return model
|
| 580 |
+
|
| 581 |
except Exception as e:
|
| 582 |
print(f"Error initializing HuggingFace model: {e}")
|
| 583 |
return None
|
| 584 |
|
| 585 |
+
|
| 586 |
def classify_question(self, question: str) -> Dict[str, Any]:
|
| 587 |
"""Enhanced question classification with confidence scores"""
|
| 588 |
q_lower = question.lower()
|