Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| import librosa | |
| import numpy as np | |
| # Audio-Klassifikationsmodell (AudioSet, erkennt u.a. "Dog_bark") | |
| classifier = pipeline( | |
| task="audio-classification", | |
| model="MIT/ast-finetuned-audioset-10-10-0.4593" | |
| ) | |
| # Parameter | |
| WINDOW_SECONDS = 1.5 # Länge eines Analysefensters | |
| HOP_SECONDS = 0.75 # Schrittweite zwischen Fenstern | |
| BARK_THRESHOLD = 0.5 # ab welchem Score gilt es als Bellen | |
| MAX_PAUSE_BETWEEN_BARKS = 3.0 # >3 s Pause = neues Ereignis | |
| def bark_score_for_segment(segment, sr): | |
| """ | |
| Liefert den höchsten Score für ein Label, das "dog" und/oder "bark" enthält. | |
| """ | |
| results = classifier({"array": segment, "sampling_rate": sr}) | |
| bark_score = 0.0 | |
| for r in results: | |
| label = r["label"].lower() | |
| if "dog" in label or "bark" in label: | |
| if r["score"] > bark_score: | |
| bark_score = float(r["score"]) | |
| return bark_score | |
| def analyze_barking(audio_path): | |
| # Audio laden (Mono, 16 kHz) | |
| y, sr = librosa.load(audio_path, sr=16000, mono=True) | |
| duration = len(y) / sr | |
| if duration == 0: | |
| return "Keine gültige Audiodatei." | |
| bark_windows = [] | |
| # Überlappende Fenster | |
| t = 0.0 | |
| while t < duration: | |
| start = t | |
| end = min(t + WINDOW_SECONDS, duration) | |
| start_idx = int(start * sr) | |
| end_idx = int(end * sr) | |
| segment = y[start_idx:end_idx] | |
| # Sehr leise/leer überspringen | |
| if len(segment) == 0 or np.mean(np.abs(segment)) < 1e-4: | |
| t += HOP_SECONDS | |
| continue | |
| score = bark_score_for_segment(segment, sr) | |
| if score >= BARK_THRESHOLD: | |
| bark_windows.append((start, end, score)) | |
| t += HOP_SECONDS | |
| if not bark_windows: | |
| return ( | |
| "Es wurde kein Hundebellen mit ausreichend hoher Sicherheit erkannt.\n\n" | |
| f"(Schwellwert BARK_THRESHOLD = {BARK_THRESHOLD})" | |
| ) | |
| # Fenster zu Bell-Episoden zusammenfassen | |
| episodes = [] | |
| current_start, current_end, _ = bark_windows[0] | |
| for start, end, _ in bark_windows[1:]: | |
| if start - current_end <= MAX_PAUSE_BETWEEN_BARKS: | |
| # gleiches Ereignis, Ende verlängern | |
| current_end = max(current_end, end) | |
| else: | |
| # neues Ereignis | |
| episodes.append((current_start, current_end)) | |
| current_start, current_end = start, end | |
| episodes.append((current_start, current_end)) | |
| count_episodes = len(episodes) | |
| total_bark_duration = sum(e_end - e_start for e_start, e_end in episodes) | |
| lines = [] | |
| lines.append(f"**A: Anzahl der Bell-Ereignisse:** {count_episodes}") | |
| lines.append(f"**B: Gesamtdauer des Bellens:** {total_bark_duration:.1f} Sekunden") | |
| lines.append("") | |
| lines.append(f"_Regel: > {MAX_PAUSE_BETWEEN_BARKS:.0f} Sekunden Pause = neues Ereignis._") | |
| lines.append("\n**Details je Bell-Ereignis:**") | |
| for i, (e_start, e_end) in enumerate(episodes, start=1): | |
| dur = e_end - e_start | |
| lines.append( | |
| f"- Ereignis {i}: von {e_start:.1f}s bis {e_end:.1f}s " | |
| f"(Dauer: {dur:.1f}s)" | |
| ) | |
| return "\n".join(lines) | |
| demo = gr.Interface( | |
| fn=analyze_barking, | |
| inputs=gr.Audio(type="filepath", label="Audio hochladen (.wav, .mp3)"), | |
| outputs=gr.Markdown(), | |
| title="Barking Episode Analyzer (AudioSet)", | |
| description=( | |
| "Analysiert Hundebellen in einer Aufnahme.\n\n" | |
| "Logik:\n" | |
| "- Das Audio wird in überlappende Fenster geteilt.\n" | |
| "- In jedem Fenster wird geprüft, ob ein Label mit 'dog'/'bark' hoch genug ist.\n" | |
| "- Bellen-Fenster, die weniger als 3 Sekunden auseinander liegen, werden zu einem Ereignis zusammengefasst.\n" | |
| "- Ausgabe:\n" | |
| " A) Anzahl der Bell-Ereignisse\n" | |
| " B) Gesamtdauer des Bellens" | |
| ), | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |