Datasets:

ArXiv:
ea-dev-pjlab-results / eval_agent /check_query_completeness.py
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
#!/usr/bin/env python3
"""
Script to check if each query has at least 10 eval_results.json files
across all subfolders (numbered and supplementary) in VBench evaluation results.
"""
import os
import json
import re
from pathlib import Path
from typing import Dict, List, Set
from collections import defaultdict
queries_to_evaluate = [
"How does the model perform in terms of aesthetics?",
"How well does the model ensure that the subject maintains a consistent appearance throughout the video?",
"How effectively does the model maintain a consistent background scene throughout the video?",
"How well does the model produce smooth and natural motion that follows the physical laws of the real world?",
"To what extent are distortions like over-exposure, noise, and blur present in the generated frames?",
"How consistently does the visual style (e.g., oil painting, black and white, watercolor) align with the specified look throughout the video?",
"How consistent are the time-based effects and camera motions throughout the video?",
"How well does the generated video demonstrate overall consistency with the input prompt?",
"How effectively does the model generate multiple distinct objects in a single scene?",
"How accurately does the model generate specific object classes as described in the text prompt?",
"To what extent does the video exhibit dynamic movement rather than being overly static?",
"How accurately do human subjects in the video perform the actions described in the text prompt?",
"How accurately do the colors of the generated objects match the specifications in the text prompt?",
"How accurately does the spatial arrangement of objects reflect the positioning and relationships described in the text prompt?",
"How accurately does the generated video represent the scene described in the text prompt?",
]
def extract_query_from_folder_name(folder_name: str) -> str:
"""
Extract query from a dimension folder name.
Format: date-time-query_with_underscores
"""
# Look for pattern like "HH:MM:SS-query"
match = re.search(r'\d{2}:\d{2}:\d{2}-(.+)', folder_name)
if match:
query = match.group(1).replace('_', ' ')
if not query.endswith('?'):
query += '?'
return query
# For folders without timestamp, try direct extraction
# This handles cases where the folder name is just the query
if '_' in folder_name or ' ' in folder_name:
query = folder_name.replace('_', ' ')
if not query.endswith('?'):
query += '?'
return query
return None
def count_query_occurrences(model_path: str, min_required: int = 10, expected_queries: List[str] = None) -> Dict[str, Dict]:
"""
Count how many eval_results.json files exist for each query across all subfolders.
Recursively searches all directories for eval_results.json files.
Args:
model_path: Path to the model folder (e.g., eval_vbench_results/modelscope)
min_required: Minimum number of eval_results.json files required per query (default: 10)
Returns:
Dictionary with query statistics and missing queries
"""
model_path = Path(model_path)
if not model_path.exists():
print(f"Error: Path {model_path} does not exist!")
return {}
# Track query occurrences: query -> list of (path, folder) where it exists
query_occurrences = defaultdict(list)
# Track all unique queries found
all_queries = set()
print(f"Scanning model folder: {model_path.name}")
print("=" * 80)
# Find all eval_results.json files recursively
eval_files = list(model_path.rglob("eval_results.json"))
print(f"Found {len(eval_files)} eval_results.json files")
# Process each eval_results.json file
for eval_file in eval_files:
# Get the parent folder that contains this eval_results.json
parent_folder = eval_file.parent
# Try to extract query from the folder name
# Check if it's in a videos subfolder
if parent_folder.name == "videos":
query_folder = parent_folder.parent
else:
query_folder = parent_folder
# Extract query from folder name
query = extract_query_from_folder_name(query_folder.name)
if query:
all_queries.add(query)
# Validate the JSON file
try:
with open(eval_file, 'r') as f:
json.load(f)
# Get relative path from model folder
relative_path = eval_file.relative_to(model_path)
# Record this occurrence with the relative path
query_occurrences[query].append(str(relative_path))
except (json.JSONDecodeError, Exception) as e:
# Don't count invalid JSON files
print(f" Invalid JSON in {eval_file}: {e}")
# Analyze results
results = {
'all_queries': sorted(all_queries),
'query_counts': {},
'insufficient_queries': [],
'missing_completely': [],
'statistics': {
'total_unique_queries': len(all_queries),
'queries_with_sufficient_results': 0,
'queries_with_insufficient_results': 0,
'queries_missing_completely': 0
}
}
# If expected queries provided, check for completely missing ones
if expected_queries:
for query in expected_queries:
if query not in all_queries:
all_queries.add(query)
results['missing_completely'].append(query)
results['statistics']['queries_missing_completely'] += 1
results['query_counts'][query] = {
'count': 0,
'locations': []
}
# Check each query
for query in all_queries:
if query not in results['missing_completely']: # Skip if already marked as missing
count = len(query_occurrences[query])
results['query_counts'][query] = {
'count': count,
'locations': query_occurrences[query]
}
if count == 0:
results['missing_completely'].append(query)
results['statistics']['queries_missing_completely'] += 1
elif count < min_required:
results['insufficient_queries'].append({
'query': query,
'count': count,
'needed': min_required - count
})
results['statistics']['queries_with_insufficient_results'] += 1
else:
results['statistics']['queries_with_sufficient_results'] += 1
# Update total unique queries count
results['statistics']['total_unique_queries'] = len(all_queries)
return results
def save_insufficient_queries(results: Dict, output_file: str, min_required: int = 10):
"""
Save queries with insufficient eval_results.json files to a text file.
If a query appears less than min_required times, it will be repeated
to indicate how many more times it needs to be evaluated.
"""
with open(output_file, 'w') as f:
# Write insufficient queries (repeated based on how many more are needed)
for item in results.get('insufficient_queries', []):
query = item['query']
needed = item['needed']
# Write the query 'needed' times
for _ in range(needed):
f.write(query + '\n')
# Write completely missing queries min_required times
for query in results.get('missing_completely', []):
for _ in range(min_required):
f.write(query + '\n')
total_lines = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
len(results.get('missing_completely', [])) * min_required
print(f"\nSaved {total_lines} query lines to {output_file}")
print(f"(Queries needing multiple evaluations are repeated)")
def generate_completeness_report(results: Dict, min_required: int = 10) -> str:
"""Generate a detailed report of query completeness."""
if not results:
return "No results to report."
report = []
report.append("=" * 80)
report.append("QUERY COMPLETENESS REPORT")
report.append(f"Minimum required eval_results.json files per query: {min_required}")
report.append("=" * 80)
stats = results['statistics']
report.append("\n📊 STATISTICS:")
report.append(f" Total unique queries found: {stats['total_unique_queries']}")
report.append(f" Queries with sufficient results (>={min_required}): {stats['queries_with_sufficient_results']}")
report.append(f" Queries with insufficient results (<{min_required}): {stats['queries_with_insufficient_results']}")
report.append(f" Queries missing completely: {stats['queries_missing_completely']}")
# Report insufficient queries
if results['insufficient_queries']:
report.append("\n" + "-" * 80)
report.append("⚠️ QUERIES WITH INSUFFICIENT RESULTS:")
report.append("-" * 80)
for item in results['insufficient_queries']:
query = item['query']
count = item['count']
needed = item['needed']
report.append(f"\n Query: {query[:80]}...")
report.append(f" Current count: {count}/{min_required} (needs {needed} more)")
locations = results['query_counts'][query]['locations']
# Extract round/folder names from paths
location_names = []
for loc in locations[:5]:
parts = loc.split('/')
if len(parts) > 0:
location_names.append(parts[0]) # Get the first folder (round/subfolder)
report.append(f" Found in: {', '.join(location_names)}")
if len(locations) > 5:
report.append(f" ... and {len(locations) - 5} more")
# Report completely missing queries
if results['missing_completely']:
report.append("\n" + "-" * 80)
report.append("❌ QUERIES MISSING COMPLETELY:")
report.append("-" * 80)
for query in results['missing_completely'][:10]: # Show first 10
report.append(f" • {query}")
if len(results['missing_completely']) > 10:
report.append(f" ... and {len(results['missing_completely']) - 10} more")
# Add query count distribution
if results.get('query_counts'):
report.append("\n" + "-" * 80)
report.append("📈 QUERY COUNT DISTRIBUTION:")
report.append("-" * 80)
count_distribution = defaultdict(list)
for query, data in results['query_counts'].items():
count = data['count']
count_distribution[count].append(query)
for count in sorted(count_distribution.keys()):
queries_at_count = count_distribution[count]
report.append(f" {count} eval_results.json: {len(queries_at_count)} queries")
if count < min_required and len(queries_at_count) <= 3:
for q in queries_at_count:
report.append(f" - {q[:70]}...")
# Summary
total_missing = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \
len(results.get('missing_completely', [])) * min_required
report.append("\n" + "=" * 80)
report.append(f"SUMMARY: Need {total_missing} more evaluations to reach {min_required} per query")
report.append("=" * 80)
return "\n".join(report)
def main():
"""Main function to run the script."""
import argparse
parser = argparse.ArgumentParser(
description="Check if each query has at least N eval_results.json files across all subfolders"
)
parser.add_argument(
"path",
type=str,
help="Path to the model folder (e.g., eval_vbench_results/modelscope)"
)
parser.add_argument(
"--min-required",
type=int,
default=10,
help="Minimum number of eval_results.json files required per query (default: 10)"
)
parser.add_argument(
"--output",
type=str,
help="Save report to file"
)
parser.add_argument(
"--queries-output",
type=str,
default="queries_to_evaluate.txt",
help="Save queries that need more evaluations to a text file (repeated as needed)"
)
args = parser.parse_args()
print(f"Checking query completeness in: {args.path}")
print(f"Minimum required results per query: {args.min_required}")
print("-" * 80)
# Count query occurrences with expected queries
results = count_query_occurrences(args.path, args.min_required, queries_to_evaluate)
# Generate report
report = generate_completeness_report(results, args.min_required)
print("\n" + report)
# Save report if requested
if args.output:
output_path = Path(args.path) / args.output
with open(output_path, 'w') as f:
f.write(report)
print(f"\nReport saved to: {output_path}")
# Save queries that need more evaluations
if args.queries_output and (results.get('insufficient_queries') or results.get('missing_completely')):
output_path = Path(args.path) / args.queries_output
save_insufficient_queries(results, output_path, args.min_required)
# Return exit code
insufficient_count = len(results.get('insufficient_queries', [])) + len(results.get('missing_completely', []))
return 0 if insufficient_count == 0 else 1
if __name__ == "__main__":
exit(main())