import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # Load model and tokenizer model_name = "Hello-SimpleAI/chatgpt-detector-roberta" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) def detect_ai_text(text): if not text or len(text.strip()) == 0: return {"error": "Please provide text to analyze"} # Tokenize inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True) # Get prediction with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probs = torch.softmax(logits, dim=-1) # Get probabilities human_prob = probs[0][0].item() ai_prob = probs[0][1].item() return { "human_probability": round(human_prob * 100, 2), "ai_probability": round(ai_prob * 100, 2), "prediction": "AI Generated" if ai_prob > 0.5 else "Human Written" } # Create Gradio interface iface = gr.Interface( fn=detect_ai_text, inputs=gr.Textbox(lines=10, placeholder="Enter text to analyze..."), outputs=gr.JSON(label="Detection Results"), title="AI Text Detector", description="Detects whether text was written by a human or generated by AI" ) if __name__ == "__main__": iface.launch()