AdamF92 commited on
Commit
1df071f
·
verified ·
1 Parent(s): f5dfa39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -58
app.py CHANGED
@@ -1,70 +1,91 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
3
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
20
 
21
- messages.extend(history)
 
 
22
 
23
- messages.append({"role": "user", "content": message})
24
 
 
 
 
 
 
 
 
 
 
25
  response = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
1
+ # app.py
2
+ import os
3
  import gradio as gr
4
+ import torch
5
+ import spaces
6
+ from rxlm.rxt.models import RxTBeta
7
+ from rxlm.llm.models import DecoderOnlyTransformer
8
+ from rxlm.training.tokenizer import load_tokenizer_from_hf_hub
9
 
10
+ HF_TOKEN = os.environ.get("HF_TOKEN")
11
 
12
+ tokenizer = load_tokenizer_from_hf_hub('ReactiveAI/RxT-Beta-Micro', token=HF_TOKEN)
13
+ model = RxTBeta.from_pretrained('ReactiveAI/RxT-Beta-Micro-Supervised', token=HF_TOKEN, tokenizer=tokenizer)
14
+ model.share_components()
 
 
 
 
 
 
 
 
 
 
15
 
16
+ llm_tokenizer = load_tokenizer_from_hf_hub('ReactiveAI/rc-RxT-Beta-Base', token=HF_TOKEN)
17
+ llm_model = DecoderOnlyTransformer.from_pretrained('ReactiveAI/SQA-Transformer-Beta-SFT', token=HF_TOKEN)
18
 
19
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
+ model.to(device)
21
+ llm_model.to(device)
22
 
23
+ initial_stm = model.export_stm_state().cpu()
24
 
25
+ seq_len = 1024
26
+ llm_seq_len = 4096
27
+
28
+ @spaces.GPU
29
+ def chat(message: str, history: list, stm_state: torch.Tensor, llm_history: list, temperature: float, top_p: float):
30
+ tokenized_query = model.tokenize_query(message, max_seq_len=seq_len, device=device)
31
+
32
+ model.load_stm_state(stm_state)
33
+
34
  response = ""
35
+ llm_response = ""
36
+
37
+ with torch.amp.autocast(device_type=device.type, dtype=torch.bfloat16):
38
+ for token_id in model.interact(**tokenized_query, max_seq_len=seq_len, temperature=temperature, top_p=top_p):
39
+ response += model.stringify_token(token_id, show_memory_update=True)
40
+ yield history + [[message, response]], stm_state, llm_history
41
+
42
+ llm_chat_history = llm_model.tokenize_chat_template(llm_tokenizer, llm_history, message, max_seq_len=llm_seq_len, use_simplified_format=True)
43
+
44
+ with torch.amp.autocast(device_type=device.type, dtype=torch.bfloat16):
45
+ for token_id in llm_model.generate(**llm_chat_history, max_seq_len=llm_seq_len, temperature=temperature, top_p=top_p):
46
+ llm_response += model.stringify_token(token_id, show_memory_update=False)
47
+ yield history + [[message, response]], stm_state, llm_history + [[message, llm_response]]
48
+
49
+ return history + [[message, response]], model.export_stm_state().cpu(), llm_history + [[message, llm_response]]
50
+
51
+ with gr.Blocks(title="RxT-Beta-Micro-AI 270M (Supervised) Demo") as demo:
52
+ gr.Markdown("""
53
+ # RxT-Beta-Micro-Supervised 290M vs Stateless LLM Reference 275M
54
+ Compare Experimental Reactive Transformer with Stateless LLM Reference, trained on the same limited 10B tokens dataset.
55
+
56
+ ## Limitations
57
+ Supervised version of the model is still in intermediate stage and will be further improved
58
+ in Reinforcement Learning stages (demo will be constantly updated), so model could generate
59
+ inaccurate answers and memory retention is weak. However, it should still demonstate the architecture
60
+ advantages, especially infinite context and no delays (small delays are caused by Spaces ZeroGPU allocation).
61
+ """)
62
+
63
+ with gr.Row():
64
+ chatbot = gr.Chatbot(height=600, type='tuples')
65
+ llm_chatbot = gr.Chatbot(height=600, type='tuples')
66
+
67
+ with gr.Row():
68
+ msg = gr.Textbox(placeholder="Ask Models...", label="Query", scale=4)
69
+ send_btn = gr.Button("Send", scale=1)
70
+ clear = gr.Button("Clear & Reset STM", scale=1)
71
+
72
+ with gr.Row():
73
+ temp = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
74
+ top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
75
+
76
+ stm_state = gr.State(initial_stm.clone())
77
+
78
+ msg.submit(chat, [msg, chatbot, stm_state, llm_chatbot, temp, top_p], [chatbot, stm_state, llm_chatbot], queue=True).then(
79
+ lambda: gr.update(value=""), outputs=msg
80
+ )
81
 
82
+ send_btn.click(chat, [msg, chatbot, stm_state, llm_chatbot, temp, top_p], [chatbot, stm_state, llm_chatbot], queue=True).then(
83
+ lambda: gr.update(value=""), outputs=msg
84
+ )
85
+
86
+ clear.click(lambda: ([], [], initial_stm.clone()), None, [chatbot, llm_chatbot, stm_state])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
 
89
  if __name__ == "__main__":
90
+ demo.queue()
91
+ demo.launch()