Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig | |
| import torch | |
| MODEL_NAME = "rahul-shrivastav/BTP-model" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| def generate_response(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| generation_config = GenerationConfig( | |
| do_sample=True, | |
| top_k=50, | |
| temperature=0.7, | |
| max_new_tokens=200, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| outputs = model.generate(**inputs, generation_config=generation_config) | |
| text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return {"response": text} | |
| # API mode only — no UI | |
| demo = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(), | |
| outputs="json", | |
| allow_flagging="never" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", # Needed for Spaces to accept incoming requests | |
| server_port=7860 | |
| # enable_api=True, # allows /gradio_api calls | |
| # allow_flagging="never", # no flag button in UI | |
| # share=True # optional, for public link | |
| ) |