Datasets:

ArXiv:
ea-dev-pjlab-results / debug_model_output.py
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
#!/usr/bin/env python3
"""
Debug script to analyze model output issues and test structured generation.
"""
import json
import requests
import argparse
from typing import List, Dict, Any, Optional
def call_model(message: str, model_url: str = "http://0.0.0.0:12333/v1/chat/completions",
model_name: str = "eval-agent", system: str = "", temperature: float = 0.1,
max_tokens: int = 512) -> Optional[str]:
"""Call the model with specific parameters for debugging."""
messages = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": message})
payload = {
"model": model_name,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"stream": False
}
try:
response = requests.post(model_url, json=payload, timeout=60)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
except Exception as e:
print(f"Error: {e}")
return None
def test_structured_output():
"""Test various prompts to debug structured output issues."""
print("🔍 DEBUGGING MODEL STRUCTURED OUTPUT")
print("="*60)
# Test cases with different complexity levels
test_cases = [
{
"name": "Simple Structure Test",
"prompt": "Please respond with: <think>test thought</think> <subaspect>test aspect</subaspect> <tool>test tool</tool>",
"system": "",
"temperature": 0.0
},
{
"name": "VBench Format Test",
"prompt": "How well does the model generate objects?",
"system": "You must respond in this exact format: <think>your reasoning</think> <subaspect>specific aspect</subaspect> <tool>evaluation tool</tool>",
"temperature": 0.1
},
{
"name": "Training Data Example",
"prompt": "How accurately does the model generate specific object classes as described in the text prompt?",
"system": """You are an expert in evaluating video generation models. You must respond in this exact format:
<think>Your detailed reasoning about what to evaluate</think> <subaspect>The specific aspect to focus on</subaspect> <tool>Object Class</tool>
Available tools: Object Class, Scene, Color, Spatial Relationship, Human Action, Dynamic Degree, Multiple Objects, Overall Consistency, Aesthetic Quality, Imaging Quality, Motion Smoothness, Subject Consistency, Background Consistency""",
"temperature": 0.0
}
]
for i, test in enumerate(test_cases, 1):
print(f"\n{i}. {test['name']}")
print("-" * 40)
print(f"Prompt: {test['prompt'][:100]}...")
print(f"Temperature: {test['temperature']}")
response = call_model(
message=test['prompt'],
system=test['system'],
temperature=test['temperature']
)
if response:
print(f"Response: {response}")
# Analyze structure
has_think = "<think>" in response and "</think>" in response
has_subaspect = "<subaspect>" in response and "</subaspect>" in response
has_tool = "<tool>" in response and "</tool>" in response
print(f"Structure Analysis:")
print(f" ✅ Has <think> tags: {has_think}")
print(f" ✅ Has <subaspect> tags: {has_subaspect}")
print(f" ✅ Has <tool> tags: {has_tool}")
print(f" ✅ All tags present: {has_think and has_subaspect and has_tool}")
# Check for common errors
errors = []
if "<think>" in response and "</tool>" in response and "</think>" not in response:
errors.append("Missing </think> closing tag")
if "Object Class</tool>" in response:
errors.append("Tool name in wrong tag")
if len([tag for tag in ["<think>", "<subaspect>", "<tool>"] if tag in response]) != len([tag for tag in ["</think>", "</subaspect>", "</tool>"] if tag in response]):
errors.append("Mismatched opening/closing tags")
if errors:
print(f" ❌ Errors found: {', '.join(errors)}")
else:
print("❌ No response received")
def test_temperature_effects():
"""Test how temperature affects structured output quality."""
print("\n\n🌡️ TEMPERATURE EFFECTS ON STRUCTURED OUTPUT")
print("="*60)
prompt = "How accurately does the model generate specific object classes?"
system = "Respond in format: <think>reasoning</think> <subaspect>aspect</subaspect> <tool>Object Class</tool>"
temperatures = [0.0, 0.1, 0.3, 0.7, 1.0]
for temp in temperatures:
print(f"\nTemperature: {temp}")
print("-" * 30)
response = call_model(
message=prompt,
system=system,
temperature=temp,
max_tokens=200
)
if response:
print(f"Response: {response[:150]}...")
# Check if structure is maintained
correct_structure = (
"<think>" in response and "</think>" in response and
"<subaspect>" in response and "</subaspect>" in response and
"<tool>" in response and "</tool>" in response
)
print(f"Correct structure: {'✅' if correct_structure else '❌'}")
else:
print("❌ No response")
def analyze_training_sample():
"""Analyze a training sample to understand expected format."""
print("\n\n📚 TRAINING DATA ANALYSIS")
print("="*60)
# Load a training sample
try:
with open("data/postprocess_20250819/ea_cot_dataset_10k.json", 'r') as f:
data = json.load(f)
sample = data[0] # First sample
print("Training Sample:")
print(f"Instruction: {sample['instruction']}")
print(f"Expected Output: {sample['output']}")
# Test model with exact training example
print("\n🧪 Testing with exact training example:")
response = call_model(
message=sample['instruction'],
system=sample.get('system', ''),
temperature=0.0
)
print(f"Model Response: {response}")
# Compare
expected = sample['output']
if response and expected in response:
print("✅ Model output matches training data!")
else:
print("❌ Model output differs from training data")
# Detailed comparison
if response:
print("\nDetailed Analysis:")
print(f"Expected think: {expected[expected.find('<think>')+7:expected.find('</think>')][:50]}...")
print(f"Expected subaspect: {expected[expected.find('<subaspect>')+11:expected.find('</subaspect>')]}")
print(f"Expected tool: {expected[expected.find('<tool>')+6:expected.find('</tool>')]}")
if '<think>' in response:
think_content = response[response.find('<think>')+7:response.find('</think>')] if '</think>' in response else "INCOMPLETE"
print(f"Actual think: {think_content[:50]}...")
except Exception as e:
print(f"Could not load training data: {e}")
def main():
parser = argparse.ArgumentParser(description="Debug model structured output issues")
parser.add_argument("--model_url", default="http://0.0.0.0:12333/v1/chat/completions")
parser.add_argument("--model_name", default="eval-agent")
args = parser.parse_args()
# Test connection first
print("🔗 Testing connection...")
response = call_model("Hello", model_url=args.model_url, model_name=args.model_name)
if not response:
print("❌ Cannot connect to model server")
return
print("✅ Connected successfully!")
# Run all tests
test_structured_output()
test_temperature_effects()
analyze_training_sample()
print("\n\n💡 RECOMMENDATIONS:")
print("="*60)
print("1. Use temperature=0.0 or very low temperature for structured output")
print("2. Include explicit format instructions in system prompt")
print("3. Consider retraining with more structured output examples")
print("4. Add format validation in your evaluation pipeline")
print("5. Use constrained generation or parsing to fix malformed output")
if __name__ == "__main__":
main()