medmekk HF Staff commited on
Commit
1369690
·
1 Parent(s): af2d0c0

update builds

Browse files
Files changed (38) hide show
  1. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc +0 -0
  2. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc +0 -0
  3. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc +0 -0
  4. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc +0 -0
  5. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc +0 -0
  6. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/_ops.py +3 -3
  7. build/torch27-cxx11-cu126-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} +1 -1
  8. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc +0 -0
  9. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc +0 -0
  10. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc +0 -0
  11. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc +0 -0
  12. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc +0 -0
  13. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/_ops.py +3 -3
  14. build/torch27-cxx11-cu128-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} +1 -1
  15. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc +0 -0
  16. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc +0 -0
  17. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc +0 -0
  18. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc +0 -0
  19. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc +0 -0
  20. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/_ops.py +3 -3
  21. build/torch28-cxx11-cu126-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} +1 -1
  22. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc +0 -0
  23. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc +0 -0
  24. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc +0 -0
  25. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc +0 -0
  26. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc +0 -0
  27. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/_ops.py +3 -3
  28. build/torch28-cxx11-cu128-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} +2 -2
  29. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc +0 -0
  30. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc +0 -0
  31. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc +0 -0
  32. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc +0 -0
  33. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc +0 -0
  34. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_ops.py +3 -3
  35. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_sage_attention_44b112f_dirty.abi3.so +0 -3
  36. build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_sage_attention_af2d0c0_dirty.abi3.so +3 -0
  37. nix-build.log +0 -0
  38. torch-ext/torch_binding.h +2 -9
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc differ
 
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _sage_attention_44b112f_dirty
3
- ops = torch.ops._sage_attention_44b112f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_sage_attention_44b112f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _sage_attention_af2d0c0_dirty
3
+ ops = torch.ops._sage_attention_af2d0c0_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_sage_attention_af2d0c0_dirty::{op_name}"
build/torch27-cxx11-cu126-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b577da1986b76b2571e8dd55412621e6fc85fe1a2f847bc0a5af9851bf388cf2
3
  size 26037568
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f3b3d1c1371cf577a4e2c2fa3bbeef137aa93a89cf380816c14e650b1449f6
3
  size 26037568
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc CHANGED
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc differ
 
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _sage_attention_44b112f_dirty
3
- ops = torch.ops._sage_attention_44b112f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_sage_attention_44b112f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _sage_attention_af2d0c0_dirty
3
+ ops = torch.ops._sage_attention_af2d0c0_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_sage_attention_af2d0c0_dirty::{op_name}"
build/torch27-cxx11-cu128-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d47c952dd9781283ff0dcbd533779de33b0bfa1966dcc0cc8accd0412217c1c5
3
  size 26553840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:871d2abf021f7175f2a66cd9f3599fdd88c9be0c98df1bb4d09f9905d955405f
3
  size 26553840
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc differ
 
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _sage_attention_44b112f_dirty
3
- ops = torch.ops._sage_attention_44b112f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_sage_attention_44b112f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _sage_attention_af2d0c0_dirty
3
+ ops = torch.ops._sage_attention_af2d0c0_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_sage_attention_af2d0c0_dirty::{op_name}"
build/torch28-cxx11-cu126-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28e181de0c6388653fb4b8b2d7347f1f547fc84fe7dc45bc66db9b1431d141bc
3
  size 26037392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26b18ae63bccd4c5926533ffa1d0995e7bf3faf7919c0c55e1b829267ac73afd
3
  size 26037392
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc differ
 
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _sage_attention_44b112f_dirty
3
- ops = torch.ops._sage_attention_44b112f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_sage_attention_44b112f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _sage_attention_af2d0c0_dirty
3
+ ops = torch.ops._sage_attention_af2d0c0_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_sage_attention_af2d0c0_dirty::{op_name}"
build/torch28-cxx11-cu128-x86_64-linux/sage_attention/{_sage_attention_44b112f_dirty.abi3.so → _sage_attention_af2d0c0_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:826ab66e6c33b3b2b17c30371934a55e972d560197c5492f4dedf6fcc29f1a1e
3
- size 26553920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2681241cb3fee535e10ba52179293982bca60a5fed972404fdec8ae5fa848175
3
+ size 26549824
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/core.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc CHANGED
Binary files a/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/sage_attention/__pycache__/quant_per_thread.cpython-313.pyc differ
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _sage_attention_44b112f_dirty
3
- ops = torch.ops._sage_attention_44b112f_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_sage_attention_44b112f_dirty::{op_name}"
 
1
  import torch
2
+ from . import _sage_attention_af2d0c0_dirty
3
+ ops = torch.ops._sage_attention_af2d0c0_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_sage_attention_af2d0c0_dirty::{op_name}"
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_sage_attention_44b112f_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:824faeacc05dc7d676acaa9a005d5f4d7e62f47c361eb58a085f020e21fde29e
3
- size 26612144
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/sage_attention/_sage_attention_af2d0c0_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff47cafcc3abed4dc02589ee11c315f3b88f65a0510caa89a07825ccd8ea1a48
3
+ size 26608048
nix-build.log CHANGED
The diff for this file is too large to render. See raw diff
 
torch-ext/torch_binding.h CHANGED
@@ -226,10 +226,7 @@ void mean_scale_fuse_quant_cuda(
226
  void sm_check_89(torch::Tensor x, std::string op_name) {
227
  int device_index = x.get_device();
228
  const auto& prop = at::cuda::getDeviceProperties(device_index);
229
-
230
- std::cerr << "sm_check_89: prop->major: " << prop->major << std::endl;
231
- std::cerr << "sm_check_89: prop->minor: " << prop->minor << std::endl;
232
-
233
  if (prop->major < 8 || (prop->major == 8 && prop->minor < 9)) {
234
  TORCH_CHECK(false, op_name + " requires compute capability 8.9+");
235
  }
@@ -239,9 +236,6 @@ void sm_check_90(torch::Tensor x, std::string op_name) {
239
  int device_index = x.get_device();
240
  const auto& prop = at::cuda::getDeviceProperties(device_index);
241
 
242
- std::cerr << "sm_check_90: prop->major: " << prop->major << std::endl;
243
- std::cerr << "sm_check_90: prop->minor: " << prop->minor << std::endl;
244
-
245
  if (prop->major < 9) {
246
  TORCH_CHECK(false, op_name + " requires compute capability 9.0+");
247
  }
@@ -250,8 +244,7 @@ void sm_check_90(torch::Tensor x, std::string op_name) {
250
  void sm_check_80(torch::Tensor x, std::string op_name) {
251
  int device_index = x.get_device();
252
  const auto& prop = at::cuda::getDeviceProperties(device_index);
253
- std::cerr << "sm_check_80: prop->major: " << prop->major << std::endl;
254
- std::cerr << "sm_check_80: prop->minor: " << prop->minor << std::endl;
255
  if (prop->major < 8) {
256
  TORCH_CHECK(false, op_name + " requires compute capability 8.0+");
257
  }
 
226
  void sm_check_89(torch::Tensor x, std::string op_name) {
227
  int device_index = x.get_device();
228
  const auto& prop = at::cuda::getDeviceProperties(device_index);
229
+
 
 
 
230
  if (prop->major < 8 || (prop->major == 8 && prop->minor < 9)) {
231
  TORCH_CHECK(false, op_name + " requires compute capability 8.9+");
232
  }
 
236
  int device_index = x.get_device();
237
  const auto& prop = at::cuda::getDeviceProperties(device_index);
238
 
 
 
 
239
  if (prop->major < 9) {
240
  TORCH_CHECK(false, op_name + " requires compute capability 9.0+");
241
  }
 
244
  void sm_check_80(torch::Tensor x, std::string op_name) {
245
  int device_index = x.get_device();
246
  const auto& prop = at::cuda::getDeviceProperties(device_index);
247
+
 
248
  if (prop->major < 8) {
249
  TORCH_CHECK(false, op_name + " requires compute capability 8.0+");
250
  }