Update README.md
Browse files
README.md
CHANGED
|
@@ -17,172 +17,36 @@ tags:
|
|
| 17 |
|
| 18 |
---
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
<style type="text/css">
|
| 22 |
-
.tg {border-collapse:collapse;border-spacing:0;margin:0px auto;}
|
| 23 |
-
.tg td{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
|
| 24 |
-
overflow:hidden;padding:10px 5px;word-break:normal;}
|
| 25 |
-
.tg th{border-color:black;border-style:solid;border-width:1px;font-family:Arial, sans-serif;font-size:14px;
|
| 26 |
-
font-weight:normal;overflow:hidden;padding:10px 5px;word-break:normal;}
|
| 27 |
-
.tg .tg-baqh{text-align:center;vertical-align:top}
|
| 28 |
-
.tg .tg-0lax{text-align:left;vertical-align:top}
|
| 29 |
-
.tg .tg-amwm{font-weight:bold;text-align:center;vertical-align:top}
|
| 30 |
-
</style>
|
| 31 |
-
<table class="tg"><thead>
|
| 32 |
-
<tr>
|
| 33 |
-
<th class="tg-baqh" rowspan="2">AVG</th>
|
| 34 |
-
<th class="tg-0lax">Ling-mini-2.0</th>
|
| 35 |
-
<th class="tg-0lax">LLaDA-MoE-7B-A1B-Instruct</th>
|
| 36 |
-
<th class="tg-0lax">LLaDA2.0-mini-preview</th>
|
| 37 |
-
</tr>
|
| 38 |
-
<tr>
|
| 39 |
-
<th class="tg-amwm">60.67</th>
|
| 40 |
-
<th class="tg-amwm">52.39</th>
|
| 41 |
-
<th class="tg-amwm">58.71</th>
|
| 42 |
-
</tr></thead>
|
| 43 |
-
<tbody>
|
| 44 |
-
<tr>
|
| 45 |
-
<td class="tg-amwm" colspan="4">Knowledge</td>
|
| 46 |
-
</tr>
|
| 47 |
-
<tr>
|
| 48 |
-
<td class="tg-0lax">MMLU</td>
|
| 49 |
-
<td class="tg-baqh">78.75</td>
|
| 50 |
-
<td class="tg-baqh">67.18</td>
|
| 51 |
-
<td class="tg-baqh">72.49</td>
|
| 52 |
-
</tr>
|
| 53 |
-
<tr>
|
| 54 |
-
<td class="tg-0lax">MMLU-PRO</td>
|
| 55 |
-
<td class="tg-baqh">56.40</td>
|
| 56 |
-
<td class="tg-baqh">44.64</td>
|
| 57 |
-
<td class="tg-baqh">49.22</td>
|
| 58 |
-
</tr>
|
| 59 |
-
<tr>
|
| 60 |
-
<td class="tg-0lax">GPQA</td>
|
| 61 |
-
<td class="tg-baqh">37.99</td>
|
| 62 |
-
<td class="tg-baqh">31.09</td>
|
| 63 |
-
<td class="tg-baqh">31.82</td>
|
| 64 |
-
</tr>
|
| 65 |
-
<tr>
|
| 66 |
-
<td class="tg-0lax">CMMLU</td>
|
| 67 |
-
<td class="tg-baqh">77.84</td>
|
| 68 |
-
<td class="tg-baqh">64.30</td>
|
| 69 |
-
<td class="tg-baqh">67.53</td>
|
| 70 |
-
</tr>
|
| 71 |
-
<tr>
|
| 72 |
-
<td class="tg-0lax">C-EVAL</td>
|
| 73 |
-
<td class="tg-baqh">77.85</td>
|
| 74 |
-
<td class="tg-baqh">63.93</td>
|
| 75 |
-
<td class="tg-baqh">66.54</td>
|
| 76 |
-
</tr>
|
| 77 |
-
<tr>
|
| 78 |
-
<td class="tg-amwm" colspan="4">Reasoning</td>
|
| 79 |
-
</tr>
|
| 80 |
-
<tr>
|
| 81 |
-
<td class="tg-0lax">squad2.0</td>
|
| 82 |
-
<td class="tg-baqh">69.14</td>
|
| 83 |
-
<td class="tg-baqh">86.81</td>
|
| 84 |
-
<td class="tg-baqh">85.61</td>
|
| 85 |
-
</tr>
|
| 86 |
-
<tr>
|
| 87 |
-
<td class="tg-0lax">drop</td>
|
| 88 |
-
<td class="tg-baqh">76.35</td>
|
| 89 |
-
<td class="tg-baqh">79.77</td>
|
| 90 |
-
<td class="tg-baqh">79.49</td>
|
| 91 |
-
</tr>
|
| 92 |
-
<tr>
|
| 93 |
-
<td class="tg-0lax">korbench</td>
|
| 94 |
-
<td class="tg-baqh">51.04</td>
|
| 95 |
-
<td class="tg-baqh">38.40</td>
|
| 96 |
-
<td class="tg-baqh">37.26</td>
|
| 97 |
-
</tr>
|
| 98 |
-
<tr>
|
| 99 |
-
<td class="tg-amwm" colspan="4">Coding</td>
|
| 100 |
-
</tr>
|
| 101 |
-
<tr>
|
| 102 |
-
<td class="tg-0lax">CruxEval-O</td>
|
| 103 |
-
<td class="tg-baqh">71.12</td>
|
| 104 |
-
<td class="tg-baqh">42.38</td>
|
| 105 |
-
<td class="tg-baqh">61.88</td>
|
| 106 |
-
</tr>
|
| 107 |
-
<tr>
|
| 108 |
-
<td class="tg-0lax">mbpp</td>
|
| 109 |
-
<td class="tg-baqh">81.03</td>
|
| 110 |
-
<td class="tg-baqh">70.02</td>
|
| 111 |
-
<td class="tg-baqh">77.75</td>
|
| 112 |
-
</tr>
|
| 113 |
-
<tr>
|
| 114 |
-
<td class="tg-0lax">MultiPL-E</td>
|
| 115 |
-
<td class="tg-baqh">62.23</td>
|
| 116 |
-
<td class="tg-baqh">52.53</td>
|
| 117 |
-
<td class="tg-baqh">62.43</td>
|
| 118 |
-
</tr>
|
| 119 |
-
<tr>
|
| 120 |
-
<td class="tg-0lax">humaneval</td>
|
| 121 |
-
<td class="tg-baqh">77.44</td>
|
| 122 |
-
<td class="tg-baqh">61.59</td>
|
| 123 |
-
<td class="tg-baqh">80.49</td>
|
| 124 |
-
</tr>
|
| 125 |
-
<tr>
|
| 126 |
-
<td class="tg-0lax">livecodebench_v6</td>
|
| 127 |
-
<td class="tg-baqh">30.18</td>
|
| 128 |
-
<td class="tg-baqh">13.27</td>
|
| 129 |
-
<td class="tg-baqh">19.93</td>
|
| 130 |
-
</tr>
|
| 131 |
-
<tr>
|
| 132 |
-
<td class="tg-0lax">Bigcodebench-Full</td>
|
| 133 |
-
<td class="tg-baqh">35.88</td>
|
| 134 |
-
<td class="tg-baqh">20.44</td>
|
| 135 |
-
<td class="tg-baqh">30.44</td>
|
| 136 |
-
</tr>
|
| 137 |
-
<tr>
|
| 138 |
-
<td class="tg-amwm" colspan="4">Math</td>
|
| 139 |
-
</tr>
|
| 140 |
-
<tr>
|
| 141 |
-
<td class="tg-0lax">GSM8K</td>
|
| 142 |
-
<td class="tg-baqh">91.58</td>
|
| 143 |
-
<td class="tg-baqh">82.41</td>
|
| 144 |
-
<td class="tg-baqh">89.01</td>
|
| 145 |
-
</tr>
|
| 146 |
-
<tr>
|
| 147 |
-
<td class="tg-0lax">math</td>
|
| 148 |
-
<td class="tg-baqh">82.22</td>
|
| 149 |
-
<td class="tg-baqh">58.68</td>
|
| 150 |
-
<td class="tg-baqh">73.50</td>
|
| 151 |
-
</tr>
|
| 152 |
-
<tr>
|
| 153 |
-
<td class="tg-0lax">OlympiadBench</td>
|
| 154 |
-
<td class="tg-baqh">49.93</td>
|
| 155 |
-
<td class="tg-baqh">21.04</td>
|
| 156 |
-
<td class="tg-baqh">36.67</td>
|
| 157 |
-
</tr>
|
| 158 |
-
<tr>
|
| 159 |
-
<td class="tg-amwm" colspan="4">Agent & Alignment</td>
|
| 160 |
-
</tr>
|
| 161 |
-
<tr>
|
| 162 |
-
<td class="tg-0lax">BFCL_Live</td>
|
| 163 |
-
<td class="tg-baqh">45.74</td>
|
| 164 |
-
<td class="tg-baqh">63.09</td>
|
| 165 |
-
<td class="tg-baqh">74.11</td>
|
| 166 |
-
</tr>
|
| 167 |
-
<tr>
|
| 168 |
-
<td class="tg-0lax">IFEval-strict -prompt</td>
|
| 169 |
-
<td class="tg-baqh">69.13</td>
|
| 170 |
-
<td class="tg-baqh">59.33</td>
|
| 171 |
-
<td class="tg-baqh">62.50</td>
|
| 172 |
-
</tr>
|
| 173 |
-
<tr>
|
| 174 |
-
<td class="tg-0lax">SyllogEval<sup>*</sup></td>
|
| 175 |
-
<td class="tg-baqh">33.28</td>
|
| 176 |
-
<td class="tg-baqh">64.22</td>
|
| 177 |
-
<td class="tg-baqh">47.34</td>
|
| 178 |
-
</tr>
|
| 179 |
-
<tr>
|
| 180 |
-
<td class="tg-0lax">IXRB<sup>*</sup></td>
|
| 181 |
-
<td class="tg-baqh">19.00</td>
|
| 182 |
-
<td class="tg-baqh">15.00</td>
|
| 183 |
-
<td class="tg-baqh">27.00</td>
|
| 184 |
-
</tr>
|
| 185 |
-
</tbody></table>
|
| 186 |
|
| 187 |
**SyllogEval**<sup>*</sup> is a logic benchmark designed to evaluate the formal reasoning capabilities of Large Language Models (LLMs).
|
| 188 |
|
|
|
|
| 17 |
|
| 18 |
---
|
| 19 |
|
| 20 |
+
| Benchmark | Ling-mini-2.0 | LLaDA-MoE-7B-A1B-Instruct | LLaDA2.0-mini-preview |
|
| 21 |
+
| :------------------------------ | :-------------: | :-------------------------: | :---------------------: |
|
| 22 |
+
| **Average** | 60.67 | 52.39 | 58.71 |
|
| 23 |
+
| **Knowledge** | | | |
|
| 24 |
+
| MMLU | 78.75 | 67.18 | 72.49 |
|
| 25 |
+
| MMLU-PRO | 56.40 | 44.64 | 49.22 |
|
| 26 |
+
| GPQA | 37.99 | 31.09 | 31.82 |
|
| 27 |
+
| CMMLU | 77.84 | 64.30 | 67.53 |
|
| 28 |
+
| C-EVAL | 77.85 | 63.93 | 66.54 |
|
| 29 |
+
| **Reasoning** | | | |
|
| 30 |
+
| squad2.0 | 69.14 | 86.81 | 85.61 |
|
| 31 |
+
| drop | 76.35 | 79.77 | 79.49 |
|
| 32 |
+
| korbench | 51.04 | 38.40 | 37.26 |
|
| 33 |
+
| **Coding** | | | |
|
| 34 |
+
| CruxEval-O | 71.12 | 42.38 | 61.88 |
|
| 35 |
+
| mbpp | 81.03 | 70.02 | 77.75 |
|
| 36 |
+
| MultiPL-E | 62.23 | 52.53 | 62.43 |
|
| 37 |
+
| humaneval | 77.44 | 61.59 | 80.49 |
|
| 38 |
+
| livecodebench_v6 | 30.18 | 13.27 | 19.93 |
|
| 39 |
+
| Bigcodebench-Full | 35.88 | 20.44 | 30.44 |
|
| 40 |
+
| **Math** | | | |
|
| 41 |
+
| GSM8K | 91.58 | 82.41 | 89.01 |
|
| 42 |
+
| math | 82.22 | 58.68 | 73.50 |
|
| 43 |
+
| OlympiadBench | 49.93 | 21.04 | 36.67 |
|
| 44 |
+
| **Agent & Alignment** | | | |
|
| 45 |
+
| BFCL_Live | 45.74 | 63.09 | 74.11 |
|
| 46 |
+
| IFEval-strict -prompt | 69.13 | 59.33 | 62.50 |
|
| 47 |
+
| SyllogEval<sup>*</sup> | 33.28 | 64.22 | 47.34 |
|
| 48 |
+
| IXRB<sup>*</sup> | 19.00 | 15.00 | 27.00 |
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
**SyllogEval**<sup>*</sup> is a logic benchmark designed to evaluate the formal reasoning capabilities of Large Language Models (LLMs).
|
| 52 |
|