Spaces:
Sleeping
Sleeping
Evgueni Poloukarov
commited on
Commit
·
7d5b63d
1
Parent(s):
3f32d3a
debug: return debug file when all forecasts fail
Browse files
src/forecasting/chronos_inference.py
CHANGED
|
@@ -233,18 +233,33 @@ class ChronosInferencePipeline:
|
|
| 233 |
|
| 234 |
# Build DataFrame
|
| 235 |
data = {'timestamp': timestamps}
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
for border, forecast_data in results['borders'].items():
|
| 238 |
if 'error' not in forecast_data:
|
| 239 |
data[f'{border}_median'] = forecast_data['median']
|
| 240 |
data[f'{border}_q10'] = forecast_data['q10']
|
| 241 |
data[f'{border}_q90'] = forecast_data['q90']
|
| 242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
df = pl.DataFrame(data)
|
| 244 |
df.write_parquet(output_path)
|
| 245 |
|
| 246 |
-
print(f"
|
| 247 |
-
print(f"
|
| 248 |
|
| 249 |
return output_path
|
| 250 |
|
|
@@ -289,18 +304,43 @@ def run_inference(
|
|
| 289 |
forecast_days=forecast_days
|
| 290 |
)
|
| 291 |
|
| 292 |
-
#
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
# Export to parquet
|
| 302 |
output_filename = f"forecast_{run_date}_{forecast_type}.parquet"
|
| 303 |
output_path = os.path.join(output_dir, output_filename)
|
| 304 |
pipeline.export_to_parquet(results, output_path)
|
| 305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
return output_path
|
|
|
|
| 233 |
|
| 234 |
# Build DataFrame
|
| 235 |
data = {'timestamp': timestamps}
|
| 236 |
+
|
| 237 |
+
successful_borders = []
|
| 238 |
+
failed_borders = []
|
| 239 |
|
| 240 |
for border, forecast_data in results['borders'].items():
|
| 241 |
if 'error' not in forecast_data:
|
| 242 |
data[f'{border}_median'] = forecast_data['median']
|
| 243 |
data[f'{border}_q10'] = forecast_data['q10']
|
| 244 |
data[f'{border}_q90'] = forecast_data['q90']
|
| 245 |
+
successful_borders.append(border)
|
| 246 |
+
else:
|
| 247 |
+
failed_borders.append((border, forecast_data['error']))
|
| 248 |
+
|
| 249 |
+
# Log results
|
| 250 |
+
print(f"[EXPORT] Forecast export summary:", flush=True)
|
| 251 |
+
print(f" Successful: {len(successful_borders)} borders", flush=True)
|
| 252 |
+
print(f" Failed: {len(failed_borders)} borders", flush=True)
|
| 253 |
+
if failed_borders:
|
| 254 |
+
print(f"[EXPORT] Errors:", flush=True)
|
| 255 |
+
for border, error in failed_borders:
|
| 256 |
+
print(f" {border}: {error}", flush=True)
|
| 257 |
+
|
| 258 |
df = pl.DataFrame(data)
|
| 259 |
df.write_parquet(output_path)
|
| 260 |
|
| 261 |
+
print(f"[EXPORT] Exported to: {output_path}", flush=True)
|
| 262 |
+
print(f"[EXPORT] Shape: {df.shape}, Columns: {len(df.columns)}", flush=True)
|
| 263 |
|
| 264 |
return output_path
|
| 265 |
|
|
|
|
| 304 |
forecast_days=forecast_days
|
| 305 |
)
|
| 306 |
|
| 307 |
+
# Write debug file
|
| 308 |
+
debug_filename = f"debug_{run_date}_{forecast_type}.txt"
|
| 309 |
+
debug_path = os.path.join(output_dir, debug_filename)
|
| 310 |
+
with open(debug_path, 'w') as f:
|
| 311 |
+
f.write(f"Results summary:
|
| 312 |
+
")
|
| 313 |
+
f.write(f" Run date: {results['run_date']}
|
| 314 |
+
")
|
| 315 |
+
f.write(f" Forecast days: {results['forecast_days']}
|
| 316 |
+
")
|
| 317 |
+
f.write(f" Borders in results: {list(results['borders'].keys())}
|
| 318 |
+
|
| 319 |
+
")
|
| 320 |
+
for border, data in results['borders'].items():
|
| 321 |
+
if 'error' in data:
|
| 322 |
+
f.write(f" {border}: ERROR - {data['error']}
|
| 323 |
+
")
|
| 324 |
+
else:
|
| 325 |
+
f.write(f" {border}: OK
|
| 326 |
+
")
|
| 327 |
+
f.write(f" median count: {len(data.get('median', []))}
|
| 328 |
+
")
|
| 329 |
+
f.write(f" q10 count: {len(data.get('q10', []))}
|
| 330 |
+
")
|
| 331 |
+
f.write(f" q90 count: {len(data.get('q90', []))}
|
| 332 |
+
")
|
| 333 |
+
print(f"Debug file written to: {debug_path}", flush=True)
|
| 334 |
|
| 335 |
# Export to parquet
|
| 336 |
output_filename = f"forecast_{run_date}_{forecast_type}.parquet"
|
| 337 |
output_path = os.path.join(output_dir, output_filename)
|
| 338 |
pipeline.export_to_parquet(results, output_path)
|
| 339 |
+
|
| 340 |
+
# Check if forecast has data, if not return debug file
|
| 341 |
+
successful_count = sum(1 for data in results['borders'].values() if 'error' not in data)
|
| 342 |
+
if successful_count == 0:
|
| 343 |
+
print(f"[WARNING] No successful forecasts! Returning debug file instead.", flush=True)
|
| 344 |
+
return debug_path
|
| 345 |
+
|
| 346 |
return output_path
|