|
|
|
|
|
""" |
|
|
Script to check if each query has at least 10 eval_results.json files |
|
|
across all subfolders (numbered and supplementary) in VBench evaluation results. |
|
|
""" |
|
|
|
|
|
import os |
|
|
import json |
|
|
import re |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Set |
|
|
from collections import defaultdict |
|
|
|
|
|
|
|
|
queries_to_evaluate = [ |
|
|
"How does the model perform in terms of aesthetics?", |
|
|
"How well does the model ensure that the subject maintains a consistent appearance throughout the video?", |
|
|
"How effectively does the model maintain a consistent background scene throughout the video?", |
|
|
"How well does the model produce smooth and natural motion that follows the physical laws of the real world?", |
|
|
"To what extent are distortions like over-exposure, noise, and blur present in the generated frames?", |
|
|
"How consistently does the visual style (e.g., oil painting, black and white, watercolor) align with the specified look throughout the video?", |
|
|
"How consistent are the time-based effects and camera motions throughout the video?", |
|
|
"How well does the generated video demonstrate overall consistency with the input prompt?", |
|
|
"How effectively does the model generate multiple distinct objects in a single scene?", |
|
|
"How accurately does the model generate specific object classes as described in the text prompt?", |
|
|
"To what extent does the video exhibit dynamic movement rather than being overly static?", |
|
|
"How accurately do human subjects in the video perform the actions described in the text prompt?", |
|
|
"How accurately do the colors of the generated objects match the specifications in the text prompt?", |
|
|
"How accurately does the spatial arrangement of objects reflect the positioning and relationships described in the text prompt?", |
|
|
"How accurately does the generated video represent the scene described in the text prompt?", |
|
|
] |
|
|
|
|
|
def extract_query_from_folder_name(folder_name: str) -> str: |
|
|
""" |
|
|
Extract query from a dimension folder name. |
|
|
Format: date-time-query_with_underscores |
|
|
""" |
|
|
|
|
|
match = re.search(r'\d{2}:\d{2}:\d{2}-(.+)', folder_name) |
|
|
if match: |
|
|
query = match.group(1).replace('_', ' ') |
|
|
if not query.endswith('?'): |
|
|
query += '?' |
|
|
return query |
|
|
|
|
|
|
|
|
|
|
|
if '_' in folder_name or ' ' in folder_name: |
|
|
query = folder_name.replace('_', ' ') |
|
|
if not query.endswith('?'): |
|
|
query += '?' |
|
|
return query |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
def count_query_occurrences(model_path: str, min_required: int = 10, expected_queries: List[str] = None) -> Dict[str, Dict]: |
|
|
""" |
|
|
Count how many eval_results.json files exist for each query across all subfolders. |
|
|
Recursively searches all directories for eval_results.json files. |
|
|
|
|
|
Args: |
|
|
model_path: Path to the model folder (e.g., eval_vbench_results/modelscope) |
|
|
min_required: Minimum number of eval_results.json files required per query (default: 10) |
|
|
|
|
|
Returns: |
|
|
Dictionary with query statistics and missing queries |
|
|
""" |
|
|
model_path = Path(model_path) |
|
|
|
|
|
if not model_path.exists(): |
|
|
print(f"Error: Path {model_path} does not exist!") |
|
|
return {} |
|
|
|
|
|
|
|
|
query_occurrences = defaultdict(list) |
|
|
|
|
|
|
|
|
all_queries = set() |
|
|
|
|
|
print(f"Scanning model folder: {model_path.name}") |
|
|
print("=" * 80) |
|
|
|
|
|
|
|
|
eval_files = list(model_path.rglob("eval_results.json")) |
|
|
print(f"Found {len(eval_files)} eval_results.json files") |
|
|
|
|
|
|
|
|
for eval_file in eval_files: |
|
|
|
|
|
parent_folder = eval_file.parent |
|
|
|
|
|
|
|
|
|
|
|
if parent_folder.name == "videos": |
|
|
query_folder = parent_folder.parent |
|
|
else: |
|
|
query_folder = parent_folder |
|
|
|
|
|
|
|
|
query = extract_query_from_folder_name(query_folder.name) |
|
|
|
|
|
if query: |
|
|
all_queries.add(query) |
|
|
|
|
|
|
|
|
try: |
|
|
with open(eval_file, 'r') as f: |
|
|
json.load(f) |
|
|
|
|
|
|
|
|
relative_path = eval_file.relative_to(model_path) |
|
|
|
|
|
|
|
|
query_occurrences[query].append(str(relative_path)) |
|
|
|
|
|
except (json.JSONDecodeError, Exception) as e: |
|
|
|
|
|
print(f" Invalid JSON in {eval_file}: {e}") |
|
|
|
|
|
|
|
|
results = { |
|
|
'all_queries': sorted(all_queries), |
|
|
'query_counts': {}, |
|
|
'insufficient_queries': [], |
|
|
'missing_completely': [], |
|
|
'statistics': { |
|
|
'total_unique_queries': len(all_queries), |
|
|
'queries_with_sufficient_results': 0, |
|
|
'queries_with_insufficient_results': 0, |
|
|
'queries_missing_completely': 0 |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if expected_queries: |
|
|
for query in expected_queries: |
|
|
if query not in all_queries: |
|
|
all_queries.add(query) |
|
|
results['missing_completely'].append(query) |
|
|
results['statistics']['queries_missing_completely'] += 1 |
|
|
results['query_counts'][query] = { |
|
|
'count': 0, |
|
|
'locations': [] |
|
|
} |
|
|
|
|
|
|
|
|
for query in all_queries: |
|
|
if query not in results['missing_completely']: |
|
|
count = len(query_occurrences[query]) |
|
|
results['query_counts'][query] = { |
|
|
'count': count, |
|
|
'locations': query_occurrences[query] |
|
|
} |
|
|
|
|
|
if count == 0: |
|
|
results['missing_completely'].append(query) |
|
|
results['statistics']['queries_missing_completely'] += 1 |
|
|
elif count < min_required: |
|
|
results['insufficient_queries'].append({ |
|
|
'query': query, |
|
|
'count': count, |
|
|
'needed': min_required - count |
|
|
}) |
|
|
results['statistics']['queries_with_insufficient_results'] += 1 |
|
|
else: |
|
|
results['statistics']['queries_with_sufficient_results'] += 1 |
|
|
|
|
|
|
|
|
results['statistics']['total_unique_queries'] = len(all_queries) |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
def save_insufficient_queries(results: Dict, output_file: str, min_required: int = 10): |
|
|
""" |
|
|
Save queries with insufficient eval_results.json files to a text file. |
|
|
If a query appears less than min_required times, it will be repeated |
|
|
to indicate how many more times it needs to be evaluated. |
|
|
""" |
|
|
with open(output_file, 'w') as f: |
|
|
|
|
|
for item in results.get('insufficient_queries', []): |
|
|
query = item['query'] |
|
|
needed = item['needed'] |
|
|
|
|
|
for _ in range(needed): |
|
|
f.write(query + '\n') |
|
|
|
|
|
|
|
|
for query in results.get('missing_completely', []): |
|
|
for _ in range(min_required): |
|
|
f.write(query + '\n') |
|
|
|
|
|
total_lines = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \ |
|
|
len(results.get('missing_completely', [])) * min_required |
|
|
|
|
|
print(f"\nSaved {total_lines} query lines to {output_file}") |
|
|
print(f"(Queries needing multiple evaluations are repeated)") |
|
|
|
|
|
|
|
|
def generate_completeness_report(results: Dict, min_required: int = 10) -> str: |
|
|
"""Generate a detailed report of query completeness.""" |
|
|
|
|
|
if not results: |
|
|
return "No results to report." |
|
|
|
|
|
report = [] |
|
|
report.append("=" * 80) |
|
|
report.append("QUERY COMPLETENESS REPORT") |
|
|
report.append(f"Minimum required eval_results.json files per query: {min_required}") |
|
|
report.append("=" * 80) |
|
|
|
|
|
stats = results['statistics'] |
|
|
report.append("\n📊 STATISTICS:") |
|
|
report.append(f" Total unique queries found: {stats['total_unique_queries']}") |
|
|
report.append(f" Queries with sufficient results (>={min_required}): {stats['queries_with_sufficient_results']}") |
|
|
report.append(f" Queries with insufficient results (<{min_required}): {stats['queries_with_insufficient_results']}") |
|
|
report.append(f" Queries missing completely: {stats['queries_missing_completely']}") |
|
|
|
|
|
|
|
|
if results['insufficient_queries']: |
|
|
report.append("\n" + "-" * 80) |
|
|
report.append("⚠️ QUERIES WITH INSUFFICIENT RESULTS:") |
|
|
report.append("-" * 80) |
|
|
for item in results['insufficient_queries']: |
|
|
query = item['query'] |
|
|
count = item['count'] |
|
|
needed = item['needed'] |
|
|
report.append(f"\n Query: {query[:80]}...") |
|
|
report.append(f" Current count: {count}/{min_required} (needs {needed} more)") |
|
|
locations = results['query_counts'][query]['locations'] |
|
|
|
|
|
location_names = [] |
|
|
for loc in locations[:5]: |
|
|
parts = loc.split('/') |
|
|
if len(parts) > 0: |
|
|
location_names.append(parts[0]) |
|
|
report.append(f" Found in: {', '.join(location_names)}") |
|
|
if len(locations) > 5: |
|
|
report.append(f" ... and {len(locations) - 5} more") |
|
|
|
|
|
|
|
|
if results['missing_completely']: |
|
|
report.append("\n" + "-" * 80) |
|
|
report.append("❌ QUERIES MISSING COMPLETELY:") |
|
|
report.append("-" * 80) |
|
|
for query in results['missing_completely'][:10]: |
|
|
report.append(f" • {query}") |
|
|
if len(results['missing_completely']) > 10: |
|
|
report.append(f" ... and {len(results['missing_completely']) - 10} more") |
|
|
|
|
|
|
|
|
if results.get('query_counts'): |
|
|
report.append("\n" + "-" * 80) |
|
|
report.append("📈 QUERY COUNT DISTRIBUTION:") |
|
|
report.append("-" * 80) |
|
|
count_distribution = defaultdict(list) |
|
|
for query, data in results['query_counts'].items(): |
|
|
count = data['count'] |
|
|
count_distribution[count].append(query) |
|
|
|
|
|
for count in sorted(count_distribution.keys()): |
|
|
queries_at_count = count_distribution[count] |
|
|
report.append(f" {count} eval_results.json: {len(queries_at_count)} queries") |
|
|
if count < min_required and len(queries_at_count) <= 3: |
|
|
for q in queries_at_count: |
|
|
report.append(f" - {q[:70]}...") |
|
|
|
|
|
|
|
|
total_missing = sum(item['needed'] for item in results.get('insufficient_queries', [])) + \ |
|
|
len(results.get('missing_completely', [])) * min_required |
|
|
|
|
|
report.append("\n" + "=" * 80) |
|
|
report.append(f"SUMMARY: Need {total_missing} more evaluations to reach {min_required} per query") |
|
|
report.append("=" * 80) |
|
|
|
|
|
return "\n".join(report) |
|
|
|
|
|
|
|
|
def main(): |
|
|
"""Main function to run the script.""" |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser( |
|
|
description="Check if each query has at least N eval_results.json files across all subfolders" |
|
|
) |
|
|
parser.add_argument( |
|
|
"path", |
|
|
type=str, |
|
|
help="Path to the model folder (e.g., eval_vbench_results/modelscope)" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--min-required", |
|
|
type=int, |
|
|
default=10, |
|
|
help="Minimum number of eval_results.json files required per query (default: 10)" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--output", |
|
|
type=str, |
|
|
help="Save report to file" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--queries-output", |
|
|
type=str, |
|
|
default="queries_to_evaluate.txt", |
|
|
help="Save queries that need more evaluations to a text file (repeated as needed)" |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
print(f"Checking query completeness in: {args.path}") |
|
|
print(f"Minimum required results per query: {args.min_required}") |
|
|
print("-" * 80) |
|
|
|
|
|
|
|
|
results = count_query_occurrences(args.path, args.min_required, queries_to_evaluate) |
|
|
|
|
|
|
|
|
report = generate_completeness_report(results, args.min_required) |
|
|
print("\n" + report) |
|
|
|
|
|
|
|
|
if args.output: |
|
|
output_path = Path(args.path) / args.output |
|
|
with open(output_path, 'w') as f: |
|
|
f.write(report) |
|
|
print(f"\nReport saved to: {output_path}") |
|
|
|
|
|
|
|
|
if args.queries_output and (results.get('insufficient_queries') or results.get('missing_completely')): |
|
|
output_path = Path(args.path) / args.queries_output |
|
|
save_insufficient_queries(results, output_path, args.min_required) |
|
|
|
|
|
|
|
|
insufficient_count = len(results.get('insufficient_queries', [])) + len(results.get('missing_completely', [])) |
|
|
return 0 if insufficient_count == 0 else 1 |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
exit(main()) |