yagnik12 commited on
Commit
54ae27f
·
verified ·
1 Parent(s): 736c7ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -1,21 +1,21 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
3
 
4
- # 1. Load model & tokenizer
5
- MODEL = "microsoft/deberta-v3-small" # you can fine-tune later on BiScope_Data
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
7
- model = AutoModelForSequenceClassification.from_pretrained(MODEL, num_labels=2)
8
 
9
- # 2. Build pipeline
10
  detector = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
11
 
12
- # 3. Detection function
 
 
13
  def detect_ai(text):
14
  results = detector(text)[0]
15
- # Assuming label 0 = Human, 1 = AI
16
  human_score = [r["score"] for r in results if r["label"] in ["LABEL_0", "0"]][0]
17
  ai_score = [r["score"] for r in results if r["label"] in ["LABEL_1", "1"]][0]
18
-
19
  prediction = "🧑 Human" if human_score > ai_score else "🤖 AI"
20
  return {
21
  "Prediction": prediction,
@@ -23,15 +23,17 @@ def detect_ai(text):
23
  "AI Probability": round(ai_score * 100, 2)
24
  }
25
 
26
- # 4. Gradio UI
27
- demo = gr.Interface(
28
- fn=detect_ai,
29
- inputs=gr.Textbox(lines=5, placeholder="Enter text here..."),
30
- outputs="json",
31
- title="AI vs Human Text Detector",
32
- description="Demo AI text detection using Hugging Face Transformers.\n"
33
- "Trained/Fine-tuned models can be swapped in for better accuracy on BiScope_Data."
34
- )
 
 
 
35
 
36
- if __name__ == "__main__":
37
- demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
3
+ from datasets import load_dataset
4
 
5
+ # Load your fine-tuned model from HF Hub
6
+ MODEL = "Yagnik12/biscope-detector" # use fine-tuned model
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
8
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL)
9
 
 
10
  detector = pipeline("text-classification", model=model, tokenizer=tokenizer, return_all_scores=True)
11
 
12
+ # Load BiScope dataset (for demo examples)
13
+ biscope = load_dataset("HanxiGuo/BiScope_Data", split="test[:20]")
14
+
15
  def detect_ai(text):
16
  results = detector(text)[0]
 
17
  human_score = [r["score"] for r in results if r["label"] in ["LABEL_0", "0"]][0]
18
  ai_score = [r["score"] for r in results if r["label"] in ["LABEL_1", "1"]][0]
 
19
  prediction = "🧑 Human" if human_score > ai_score else "🤖 AI"
20
  return {
21
  "Prediction": prediction,
 
23
  "AI Probability": round(ai_score * 100, 2)
24
  }
25
 
26
+ with gr.Blocks() as demo:
27
+ gr.Markdown("# AI vs Human Text Detector (BiScope)")
28
+
29
+ with gr.Row():
30
+ inp = gr.Textbox(lines=5, placeholder="Enter text here...")
31
+ out = gr.JSON()
32
+ btn = gr.Button("Detect")
33
+ btn.click(fn=detect_ai, inputs=inp, outputs=out)
34
+
35
+ gr.Markdown("### 🔎 Try BiScope Dataset Examples")
36
+ examples = [biscope[i]["text"] for i in range(len(biscope))]
37
+ gr.Examples(examples=examples, inputs=inp)
38
 
39
+ demo.launch()