Update app.py
Browse files
app.py
CHANGED
|
@@ -1,21 +1,21 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
|
|
|
| 3 |
|
| 4 |
-
#
|
| 5 |
-
MODEL = "
|
| 6 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
| 7 |
-
model = AutoModelForSequenceClassification.from_pretrained(MODEL
|
| 8 |
|
| 9 |
-
# 2. Build pipeline
|
| 10 |
detector = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
|
| 11 |
|
| 12 |
-
#
|
|
|
|
|
|
|
| 13 |
def detect_ai(text):
|
| 14 |
results = detector(text)[0]
|
| 15 |
-
# Assuming label 0 = Human, 1 = AI
|
| 16 |
human_score = [r["score"] for r in results if r["label"] in ["LABEL_0", "0"]][0]
|
| 17 |
ai_score = [r["score"] for r in results if r["label"] in ["LABEL_1", "1"]][0]
|
| 18 |
-
|
| 19 |
prediction = "🧑 Human" if human_score > ai_score else "🤖 AI"
|
| 20 |
return {
|
| 21 |
"Prediction": prediction,
|
|
@@ -23,15 +23,17 @@ def detect_ai(text):
|
|
| 23 |
"AI Probability": round(ai_score * 100, 2)
|
| 24 |
}
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
|
| 37 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
|
| 5 |
+
# Load your fine-tuned model from HF Hub
|
| 6 |
+
MODEL = "Yagnik12/biscope-detector" # ✅ use fine-tuned model
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
| 8 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
|
| 9 |
|
|
|
|
| 10 |
detector = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
|
| 11 |
|
| 12 |
+
# Load BiScope dataset (for demo examples)
|
| 13 |
+
biscope = load_dataset("HanxiGuo/BiScope_Data", split="test[:20]")
|
| 14 |
+
|
| 15 |
def detect_ai(text):
|
| 16 |
results = detector(text)[0]
|
|
|
|
| 17 |
human_score = [r["score"] for r in results if r["label"] in ["LABEL_0", "0"]][0]
|
| 18 |
ai_score = [r["score"] for r in results if r["label"] in ["LABEL_1", "1"]][0]
|
|
|
|
| 19 |
prediction = "🧑 Human" if human_score > ai_score else "🤖 AI"
|
| 20 |
return {
|
| 21 |
"Prediction": prediction,
|
|
|
|
| 23 |
"AI Probability": round(ai_score * 100, 2)
|
| 24 |
}
|
| 25 |
|
| 26 |
+
with gr.Blocks() as demo:
|
| 27 |
+
gr.Markdown("# AI vs Human Text Detector (BiScope)")
|
| 28 |
+
|
| 29 |
+
with gr.Row():
|
| 30 |
+
inp = gr.Textbox(lines=5, placeholder="Enter text here...")
|
| 31 |
+
out = gr.JSON()
|
| 32 |
+
btn = gr.Button("Detect")
|
| 33 |
+
btn.click(fn=detect_ai, inputs=inp, outputs=out)
|
| 34 |
+
|
| 35 |
+
gr.Markdown("### 🔎 Try BiScope Dataset Examples")
|
| 36 |
+
examples = [biscope[i]["text"] for i in range(len(biscope))]
|
| 37 |
+
gr.Examples(examples=examples, inputs=inp)
|
| 38 |
|
| 39 |
+
demo.launch()
|
|
|