akhaliq HF Staff commited on
Commit
bac1c21
Β·
verified Β·
1 Parent(s): f796e9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -191,12 +191,20 @@ def chat_function_gpu(message, history):
191
  # Decode and return response
192
  decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
193
 
194
- # Return in tuple format for Gradio chatbot
195
- return history + [[message, decoded_output]]
 
 
 
 
196
 
197
  except Exception as e:
198
  error_msg = f"Error processing your request: {str(e)}"
199
- return history + [[message, error_msg]]
 
 
 
 
200
 
201
  # Fallback CPU function for when GPU is not available
202
  def chat_function_cpu(message, history):
@@ -241,12 +249,20 @@ def chat_function_cpu(message, history):
241
  # Decode and return response
242
  decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
243
 
244
- # Return in tuple format for Gradio chatbot
245
- return history + [[message, decoded_output]]
 
 
 
 
246
 
247
  except Exception as e:
248
  error_msg = f"Error processing your request: {str(e)}"
249
- return history + [[message, error_msg]]
 
 
 
 
250
 
251
  # Create custom theme optimized for ZeroGPU
252
  custom_theme = gr.themes.Soft(
@@ -264,7 +280,7 @@ custom_theme = gr.themes.Soft(
264
  )
265
 
266
  # Create Gradio interface with ZeroGPU support - Gradio 6 syntax
267
- with gr.Blocks(fill_height=True) as demo:
268
  gr.Markdown("""
269
  # πŸš€ Mistral Vibe - AI Coding Assistant
270
 
@@ -275,7 +291,8 @@ with gr.Blocks(fill_height=True) as demo:
275
 
276
  chatbot = gr.Chatbot(
277
  height=600,
278
- label="Chat with Mistral Vibe"
 
279
  )
280
 
281
  with gr.Row():
@@ -341,21 +358,6 @@ with gr.Blocks(fill_height=True) as demo:
341
  # Launch with custom theme and ZeroGPU settings - Gradio 6 syntax
342
  demo.queue() # Enable queue separately in Gradio 6
343
  demo.launch(
344
- theme=custom_theme,
345
- footer_links=[
346
- {
347
- "label": "Built with anycoder",
348
- "url": "https://huggingface.co/spaces/akhaliq/anycoder"
349
- },
350
- {
351
- "label": "Mistral AI",
352
- "url": "https://mistral.ai"
353
- },
354
- {
355
- "label": "Hugging Face ZeroGPU",
356
- "url": "https://huggingface.co/docs/hub/spaces-zerogpu"
357
- }
358
- ],
359
  share=False,
360
  max_threads=4,
361
  show_error=True
 
191
  # Decode and return response
192
  decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
193
 
194
+ # Return in correct Gradio chatbot format
195
+ new_history = history + [
196
+ {"role": "user", "content": message},
197
+ {"role": "assistant", "content": decoded_output}
198
+ ]
199
+ return new_history
200
 
201
  except Exception as e:
202
  error_msg = f"Error processing your request: {str(e)}"
203
+ new_history = history + [
204
+ {"role": "user", "content": message},
205
+ {"role": "assistant", "content": error_msg}
206
+ ]
207
+ return new_history
208
 
209
  # Fallback CPU function for when GPU is not available
210
  def chat_function_cpu(message, history):
 
249
  # Decode and return response
250
  decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]) :])
251
 
252
+ # Return in correct Gradio chatbot format
253
+ new_history = history + [
254
+ {"role": "user", "content": message},
255
+ {"role": "assistant", "content": decoded_output}
256
+ ]
257
+ return new_history
258
 
259
  except Exception as e:
260
  error_msg = f"Error processing your request: {str(e)}"
261
+ new_history = history + [
262
+ {"role": "user", "content": message},
263
+ {"role": "assistant", "content": error_msg}
264
+ ]
265
+ return new_history
266
 
267
  # Create custom theme optimized for ZeroGPU
268
  custom_theme = gr.themes.Soft(
 
280
  )
281
 
282
  # Create Gradio interface with ZeroGPU support - Gradio 6 syntax
283
+ with gr.Blocks(fill_height=True, theme=custom_theme) as demo:
284
  gr.Markdown("""
285
  # πŸš€ Mistral Vibe - AI Coding Assistant
286
 
 
291
 
292
  chatbot = gr.Chatbot(
293
  height=600,
294
+ label="Chat with Mistral Vibe",
295
+ type="messages" # Use messages format for Gradio 6
296
  )
297
 
298
  with gr.Row():
 
358
  # Launch with custom theme and ZeroGPU settings - Gradio 6 syntax
359
  demo.queue() # Enable queue separately in Gradio 6
360
  demo.launch(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  share=False,
362
  max_threads=4,
363
  show_error=True