Spaces:
Paused
Paused
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Load model (CPU-friendly, no token required) | |
| model_id = "replit/replit-code-v1_5-3b" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id) | |
| # Ensure it's on CPU | |
| device = torch.device("cpu") | |
| model.to(device) | |
| def convert_python_to_r(python_code): | |
| # Prompt to guide the model | |
| prompt = f"""### Task: | |
| Convert the following Python code to equivalent R code. | |
| ### Python code: | |
| {python_code} | |
| ### R code: | |
| """ | |
| # Tokenize input | |
| input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.to(device) | |
| # Generate | |
| outputs = model.generate( | |
| input_ids, | |
| max_length=512, | |
| temperature=0.2, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode result | |
| result = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract R code from the result (after prompt) | |
| if "### R code:" in result: | |
| result = result.split("### R code:")[-1].strip() | |
| return result | |
| # Gradio interface | |
| gr.Interface( | |
| fn=convert_python_to_r, | |
| inputs=gr.Textbox(lines=10, placeholder="Paste your Python code here..."), | |
| outputs="text", | |
| title="Python to R Code Converter", | |
| description="Converts Python code to R using Replit Code Model (3B). Optimized for Hugging Face CPU Basic tier." | |
| ).launch() | |